Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -2,73 +2,160 @@ import os
|
|
2 |
import shlex
|
3 |
import spaces
|
4 |
import subprocess
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
#
|
|
|
|
|
7 |
def install_cuda_toolkit():
|
|
|
8 |
CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_550.54.14_linux.run"
|
9 |
-
CUDA_TOOLKIT_FILE = "/tmp
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
13 |
|
14 |
os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
15 |
-
os.environ["PATH"] = "
|
16 |
-
os.environ["LD_LIBRARY_PATH"] = "
|
17 |
-
os.environ["CUDA_HOME"],
|
18 |
-
"" if "LD_LIBRARY_PATH" not in os.environ else os.environ["LD_LIBRARY_PATH"],
|
19 |
-
)
|
20 |
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
21 |
|
22 |
install_cuda_toolkit()
|
|
|
23 |
os.system("pip list | grep torch")
|
24 |
os.system('nvcc -V')
|
25 |
-
print("
|
26 |
-
os.system("cd /home/user/app/step1x3d_texture/differentiable_renderer/ && python setup.py install")
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
Step1X3DTexturePipeline,
|
40 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
from step1x3d_geometry.models.pipelines.pipeline_utils import reduce_face, remove_degenerate_face
|
42 |
|
43 |
-
# --- Configuración y Carga de Modelos (sin cambios) ---
|
44 |
parser = argparse.ArgumentParser()
|
45 |
-
parser.add_argument(
|
46 |
-
|
47 |
-
)
|
48 |
-
parser.add_argument(
|
49 |
-
"--texture_model", type=str, default="Step1X-3D-Texture"
|
50 |
-
)
|
51 |
parser.add_argument("--cache_dir", type=str, default="cache")
|
52 |
args = parser.parse_args()
|
53 |
|
54 |
os.makedirs(args.cache_dir, exist_ok=True)
|
|
|
|
|
55 |
|
|
|
|
|
|
|
56 |
geometry_model = Step1X3DGeometryPipeline.from_pretrained(
|
57 |
"stepfun-ai/Step1X-3D", subfolder=args.geometry_model
|
58 |
-
).to(
|
|
|
59 |
|
|
|
|
|
60 |
texture_model = Step1X3DTexturePipeline.from_pretrained("stepfun-ai/Step1X-3D", subfolder=args.texture_model)
|
|
|
61 |
|
62 |
|
63 |
-
# ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
@spaces.GPU(duration=180)
|
66 |
-
def generate_geometry(
|
67 |
-
input_image_path, guidance_scale, inference_steps, max_facenum, symmetry, edge_type
|
68 |
-
):
|
69 |
"""
|
70 |
-
|
71 |
"""
|
|
|
|
|
|
|
72 |
print("Iniciando generación de geometría...")
|
73 |
if "Label" in args.geometry_model:
|
74 |
symmetry_values = ["x", "asymmetry"]
|
@@ -95,26 +182,24 @@ def generate_geometry(
|
|
95 |
|
96 |
torch.cuda.empty_cache()
|
97 |
print(f"Geometría guardada en: {geometry_save_path}")
|
98 |
-
|
99 |
-
# Devuelve la ruta para mostrar y para guardar en el estado
|
100 |
return geometry_save_path, geometry_save_path
|
101 |
|
102 |
@spaces.GPU(duration=120)
|
103 |
def generate_texture(input_image_path, geometry_path):
|
104 |
"""
|
105 |
-
|
106 |
"""
|
107 |
if not geometry_path or not os.path.exists(geometry_path):
|
108 |
raise gr.Error("Por favor, primero genera la geometría antes de texturizar.")
|
|
|
|
|
109 |
|
110 |
print(f"Iniciando texturizado para la malla: {geometry_path}")
|
111 |
geometry_mesh = trimesh.load(geometry_path)
|
112 |
|
113 |
-
# Post-procesamiento de la geometría antes de texturizar
|
114 |
geometry_mesh = remove_degenerate_face(geometry_mesh)
|
115 |
geometry_mesh = reduce_face(geometry_mesh)
|
116 |
|
117 |
-
# Aplicar textura
|
118 |
textured_mesh = texture_model(input_image_path, geometry_mesh)
|
119 |
|
120 |
save_name = os.path.basename(geometry_path).replace(".glb", "")
|
@@ -123,91 +208,75 @@ def generate_texture(input_image_path, geometry_path):
|
|
123 |
|
124 |
torch.cuda.empty_cache()
|
125 |
print(f"Malla texturizada guardada en: {textured_save_path}")
|
126 |
-
|
127 |
return textured_save_path
|
128 |
|
|
|
|
|
|
|
129 |
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
gr.Markdown("# Step1X-3D")
|
134 |
|
135 |
-
|
136 |
geometry_path_state = gr.State()
|
137 |
|
138 |
with gr.Row():
|
139 |
with gr.Column(scale=2):
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
)
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
label="Symmetry
|
149 |
-
value="
|
150 |
-
type="index",
|
151 |
-
)
|
152 |
-
edge_type = gr.Radio(
|
153 |
-
choices=["sharp", "normal", "smooth"],
|
154 |
-
label="Edge Type",
|
155 |
-
value="sharp",
|
156 |
-
type="value",
|
157 |
-
)
|
158 |
|
159 |
with gr.Row():
|
160 |
-
|
161 |
-
|
|
|
|
|
162 |
|
163 |
with gr.Column(scale=4):
|
164 |
-
|
165 |
-
|
|
|
|
|
166 |
|
167 |
with gr.Column(scale=1):
|
168 |
gr.Examples(
|
169 |
examples=[
|
170 |
-
["examples/images/000.png"],
|
171 |
-
["examples/images/001.png"],
|
172 |
-
["examples/images/004.png"],
|
173 |
-
["examples/images/008.png"],
|
174 |
-
["examples/images/028.png"],
|
175 |
-
["examples/images/032.png"],
|
176 |
-
["examples/images/061.png"],
|
177 |
-
["examples/images/107.png"],
|
178 |
],
|
179 |
-
inputs=[input_image],
|
180 |
-
cache_examples=False
|
181 |
)
|
182 |
|
183 |
-
# Lógica de
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
185 |
-
|
186 |
-
|
187 |
-
fn=generate_geometry,
|
188 |
-
inputs=[
|
189 |
-
input_image,
|
190 |
-
guidance_scale,
|
191 |
-
inference_steps,
|
192 |
-
max_facenum,
|
193 |
-
symmetry,
|
194 |
-
edge_type,
|
195 |
-
],
|
196 |
-
outputs=[geometry_preview, geometry_path_state]
|
197 |
-
).then(
|
198 |
-
# Cuando la geometría termine, actualiza la interfaz
|
199 |
-
fn=lambda: {
|
200 |
-
btn_tex: gr.update(visible=True), # Muestra el botón de texturizado
|
201 |
-
textured_preview: gr.update(value=None) # Limpia la vista previa de la textura anterior
|
202 |
-
},
|
203 |
-
outputs=[btn_tex, textured_preview]
|
204 |
)
|
205 |
|
206 |
-
|
207 |
-
|
208 |
-
fn=generate_texture,
|
209 |
-
inputs=[input_image, geometry_path_state],
|
210 |
-
outputs=[textured_preview],
|
211 |
)
|
|
|
|
|
212 |
|
213 |
-
demo.launch(ssr_mode=False)
|
|
|
2 |
import shlex
|
3 |
import spaces
|
4 |
import subprocess
|
5 |
+
import time
|
6 |
+
import uuid
|
7 |
+
import torch
|
8 |
+
import trimesh
|
9 |
+
import argparse
|
10 |
+
import numpy as np
|
11 |
+
import gradio as gr
|
12 |
+
from PIL import Image, ImageOps
|
13 |
|
14 |
+
# --------------------------------------------------------------------------
|
15 |
+
# 1. INSTALACIÓN DEL ENTORNO Y COMPILACIÓN DE EXTENSIONES
|
16 |
+
# --------------------------------------------------------------------------
|
17 |
def install_cuda_toolkit():
|
18 |
+
print("Verificando e instalando CUDA Toolkit si es necesario...")
|
19 |
CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_550.54.14_linux.run"
|
20 |
+
CUDA_TOOLKIT_FILE = f"/tmp/{os.path.basename(CUDA_TOOLKIT_URL)}"
|
21 |
+
if not os.path.exists("/usr/local/cuda/bin/nvcc"):
|
22 |
+
subprocess.run(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE], check=True)
|
23 |
+
subprocess.run(["chmod", "+x", CUDA_TOOLKIT_FILE], check=True)
|
24 |
+
subprocess.run([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"], check=True)
|
25 |
+
else:
|
26 |
+
print("CUDA Toolkit ya está instalado.")
|
27 |
|
28 |
os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
29 |
+
os.environ["PATH"] = f"{os.environ['CUDA_HOME']}/bin:{os.environ['PATH']}"
|
30 |
+
os.environ["LD_LIBRARY_PATH"] = f"{os.environ['CUDA_HOME']}/lib:{os.environ.get('LD_LIBRARY_PATH', '')}"
|
|
|
|
|
|
|
31 |
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
32 |
|
33 |
install_cuda_toolkit()
|
34 |
+
print("--- Versiones de librerías ---")
|
35 |
os.system("pip list | grep torch")
|
36 |
os.system('nvcc -V')
|
37 |
+
print("-----------------------------")
|
|
|
38 |
|
39 |
+
# --- RUTAS CORREGIDAS ---
|
40 |
+
# Construir rutas absolutas basadas en la ubicación del script app.py
|
41 |
+
APP_DIR = os.path.dirname(os.path.abspath(__file__))
|
42 |
+
PROJECT_ROOT = os.path.join(APP_DIR, "Step1X-3D")
|
43 |
+
DIFFERENTIABLE_RENDERER_PATH = os.path.join(PROJECT_ROOT, "step1x3d_texture/differentiable_renderer/")
|
44 |
+
CUSTOM_RASTERIZER_WHL_PATH = os.path.join(PROJECT_ROOT, "custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl")
|
45 |
|
46 |
+
print(f"Ruta del renderizador diferenciable: {DIFFERENTIABLE_RENDERER_PATH}")
|
47 |
+
print(f"Ruta del rasterizador personalizado (.whl): {CUSTOM_RASTERIZER_WHL_PATH}")
|
48 |
+
|
49 |
+
|
50 |
+
print("Compilando extensión C++/CUDA para el renderizador diferenciable...")
|
51 |
+
# Usamos cwd (current working directory) para ejecutar el comando en la carpeta correcta
|
52 |
+
subprocess.run(
|
53 |
+
"python setup.py install",
|
54 |
+
shell=True, check=True, cwd=DIFFERENTIABLE_RENDERER_PATH
|
|
|
55 |
)
|
56 |
+
|
57 |
+
print("Instalando la extensión C++/CUDA para el rasterizador personalizado...")
|
58 |
+
subprocess.run(
|
59 |
+
shlex.split(f"pip install {CUSTOM_RASTERIZER_WHL_PATH}"),
|
60 |
+
check=True
|
61 |
+
)
|
62 |
+
print("Compilaciones finalizadas.")
|
63 |
+
|
64 |
+
|
65 |
+
# --------------------------------------------------------------------------
|
66 |
+
# 2. IMPORTACIONES PRINCIPALES Y CARGA DE MODELOS
|
67 |
+
# --------------------------------------------------------------------------
|
68 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
69 |
+
from step1x3d_geometry.models.pipelines.pipeline import Step1X3DGeometryPipeline
|
70 |
+
from step1x3d_texture.pipelines.step1x_3d_texture_synthesis_pipeline import Step1X3DTexturePipeline
|
71 |
from step1x3d_geometry.models.pipelines.pipeline_utils import reduce_face, remove_degenerate_face
|
72 |
|
|
|
73 |
parser = argparse.ArgumentParser()
|
74 |
+
parser.add_argument("--geometry_model", type=str, default="Step1X-3D-Geometry-Label-1300m")
|
75 |
+
parser.add_argument("--texture_model", type=str, default="Step1X-3D-Texture")
|
|
|
|
|
|
|
|
|
76 |
parser.add_argument("--cache_dir", type=str, default="cache")
|
77 |
args = parser.parse_args()
|
78 |
|
79 |
os.makedirs(args.cache_dir, exist_ok=True)
|
80 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
81 |
+
print(f"Usando dispositivo: {device}")
|
82 |
|
83 |
+
# --- Carga de Modelos Step1X-3D ---
|
84 |
+
print("Cargando pipeline de geometría Step1X-3D...")
|
85 |
+
geometry_model_path = os.path.join(PROJECT_ROOT) # La ruta base para from_pretrained
|
86 |
geometry_model = Step1X3DGeometryPipeline.from_pretrained(
|
87 |
"stepfun-ai/Step1X-3D", subfolder=args.geometry_model
|
88 |
+
).to(device)
|
89 |
+
print("Pipeline de geometría cargado.")
|
90 |
|
91 |
+
print("Cargando pipeline de texturizado Step1X-3D...")
|
92 |
+
texture_model_path = os.path.join(PROJECT_ROOT) # La ruta base para from_pretrained
|
93 |
texture_model = Step1X3DTexturePipeline.from_pretrained("stepfun-ai/Step1X-3D", subfolder=args.texture_model)
|
94 |
+
print("Pipeline de texturizado cargado.")
|
95 |
|
96 |
|
97 |
+
# --- Carga de Modelos de ControlNet para el pre-procesamiento ---
|
98 |
+
print("Cargando modelos para el pre-procesamiento de bocetos (ControlNet)...")
|
99 |
+
controlnet = ControlNetModel.from_pretrained("xinsir/controlnet-scribble-sdxl-1.0", torch_dtype=torch.float16)
|
100 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
101 |
+
pipe_control = StableDiffusionXLControlNetPipeline.from_pretrained(
|
102 |
+
"sd-community/sdxl-flash", controlnet=controlnet, vae=vae, torch_dtype=torch.float16
|
103 |
+
)
|
104 |
+
pipe_control.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_control.scheduler.config)
|
105 |
+
pipe_control.to(device)
|
106 |
+
print("Modelos de ControlNet cargados y listos.")
|
107 |
+
|
108 |
+
|
109 |
+
# --------------------------------------------------------------------------
|
110 |
+
# 3. FUNCIONES DE BACKEND PARA CADA ETAPA
|
111 |
+
# --------------------------------------------------------------------------
|
112 |
+
|
113 |
+
@spaces.GPU(duration=60)
|
114 |
+
def enhance_sketch(image, prompt):
|
115 |
+
"""
|
116 |
+
Paso 0: Convierte un boceto en una imagen de alta calidad usando ControlNet.
|
117 |
+
"""
|
118 |
+
if image is None:
|
119 |
+
raise gr.Error("Por favor, proporciona un boceto de entrada.")
|
120 |
+
|
121 |
+
input_image = image.convert("RGB")
|
122 |
+
|
123 |
+
width, height = input_image.size
|
124 |
+
ratio = np.sqrt(1024.0 * 1024.0 / (width * height))
|
125 |
+
new_width, new_height = int(width * ratio), int(height * ratio)
|
126 |
+
input_image = input_image.resize((new_width, new_height))
|
127 |
+
input_image = ImageOps.invert(input_image)
|
128 |
+
|
129 |
+
final_prompt = f"professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting"
|
130 |
+
negative_prompt = "ugly, deformed, noisy, low poly, blurry, painting"
|
131 |
+
|
132 |
+
print(f"Mejorando boceto con prompt: '{final_prompt}'")
|
133 |
+
output_image = pipe_control(
|
134 |
+
prompt=final_prompt,
|
135 |
+
negative_prompt=negative_prompt,
|
136 |
+
image=input_image,
|
137 |
+
num_inference_steps=20,
|
138 |
+
controlnet_conditioning_scale=0.85,
|
139 |
+
guidance_scale=5.0,
|
140 |
+
width=new_width,
|
141 |
+
height=new_height,
|
142 |
+
).images[0]
|
143 |
+
|
144 |
+
save_name = str(uuid.uuid4())
|
145 |
+
processed_image_path = f"{args.cache_dir}/{save_name}_processed.png"
|
146 |
+
output_image.save(processed_image_path)
|
147 |
+
|
148 |
+
print(f"Boceto mejorado y guardado en: {processed_image_path}")
|
149 |
+
return processed_image_path, processed_image_path
|
150 |
|
151 |
@spaces.GPU(duration=180)
|
152 |
+
def generate_geometry(input_image_path, guidance_scale, inference_steps, max_facenum, symmetry, edge_type):
|
|
|
|
|
153 |
"""
|
154 |
+
Paso 1: Genera la geometría a partir de una imagen.
|
155 |
"""
|
156 |
+
if not input_image_path or not os.path.exists(input_image_path):
|
157 |
+
raise gr.Error("Primero debes proporcionar una imagen o mejorar un boceto.")
|
158 |
+
|
159 |
print("Iniciando generación de geometría...")
|
160 |
if "Label" in args.geometry_model:
|
161 |
symmetry_values = ["x", "asymmetry"]
|
|
|
182 |
|
183 |
torch.cuda.empty_cache()
|
184 |
print(f"Geometría guardada en: {geometry_save_path}")
|
|
|
|
|
185 |
return geometry_save_path, geometry_save_path
|
186 |
|
187 |
@spaces.GPU(duration=120)
|
188 |
def generate_texture(input_image_path, geometry_path):
|
189 |
"""
|
190 |
+
Paso 2: Aplica la textura a una geometría existente.
|
191 |
"""
|
192 |
if not geometry_path or not os.path.exists(geometry_path):
|
193 |
raise gr.Error("Por favor, primero genera la geometría antes de texturizar.")
|
194 |
+
if not input_image_path or not os.path.exists(input_image_path):
|
195 |
+
raise gr.Error("Se necesita una imagen de entrada para el texturizado.")
|
196 |
|
197 |
print(f"Iniciando texturizado para la malla: {geometry_path}")
|
198 |
geometry_mesh = trimesh.load(geometry_path)
|
199 |
|
|
|
200 |
geometry_mesh = remove_degenerate_face(geometry_mesh)
|
201 |
geometry_mesh = reduce_face(geometry_mesh)
|
202 |
|
|
|
203 |
textured_mesh = texture_model(input_image_path, geometry_mesh)
|
204 |
|
205 |
save_name = os.path.basename(geometry_path).replace(".glb", "")
|
|
|
208 |
|
209 |
torch.cuda.empty_cache()
|
210 |
print(f"Malla texturizada guardada en: {textured_save_path}")
|
|
|
211 |
return textured_save_path
|
212 |
|
213 |
+
# --------------------------------------------------------------------------
|
214 |
+
# 4. INTERFAZ DE USUARIO CON GRADIO
|
215 |
+
# --------------------------------------------------------------------------
|
216 |
|
217 |
+
with gr.Blocks(title="Step1X-3D: Flujo de Boceto a 3D") as demo:
|
218 |
+
gr.Markdown("# Step1X-3D: Flujo de Boceto a 3D")
|
219 |
+
gr.Markdown("Flujo de trabajo inspirado en TRELLIS. **Paso 0 (Opcional):** Sube un boceto y un prompt para generar una imagen de referencia. **Paso 1:** Genera la geometría 3D a partir de la imagen. **Paso 2:** Genera la textura para la geometría.")
|
|
|
220 |
|
221 |
+
processed_image_path_state = gr.State()
|
222 |
geometry_path_state = gr.State()
|
223 |
|
224 |
with gr.Row():
|
225 |
with gr.Column(scale=2):
|
226 |
+
gr.Markdown("### **Entrada Principal**")
|
227 |
+
input_image = gr.Image(label="Sube tu boceto o imagen de referencia", type="pil", image_mode="RGBA")
|
228 |
+
prompt = gr.Textbox(label="Prompt (describe tu objeto)", value="a detailed medieval sword")
|
229 |
+
|
230 |
+
with gr.Accordion(label="Opciones de Generación 3D", open=True):
|
231 |
+
guidance_scale = gr.Number(label="Guidance Scale (3D)", value="7.5")
|
232 |
+
inference_steps = gr.Slider(label="Inference Steps (3D)", minimum=1, maximum=100, value=50, step=1)
|
233 |
+
max_facenum = gr.Number(label="Max Face Num", value="400000")
|
234 |
+
symmetry = gr.Radio(choices=["Symmetry", "Asymmetry"], label="Symmetry", value="Symmetry", type="index")
|
235 |
+
edge_type = gr.Radio(choices=["sharp", "normal", "smooth"], label="Edge Type", value="sharp", type="value")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
with gr.Row():
|
238 |
+
btn_enhance = gr.Button("Paso 0: Mejorar Boceto")
|
239 |
+
with gr.Row():
|
240 |
+
btn_geo = gr.Button("Paso 1: Generar Geometría", interactive=False)
|
241 |
+
btn_tex = gr.Button("Paso 2: Generar Textura", visible=False, interactive=False)
|
242 |
|
243 |
with gr.Column(scale=4):
|
244 |
+
gr.Markdown("### **Resultados**")
|
245 |
+
processed_image_preview = gr.Image(label="Boceto Mejorado (Entrada para 3D)", type="filepath", interactive=False)
|
246 |
+
textured_preview = gr.Model3D(label="Modelo Texturizado Final", height=380, clear_color=[0.0, 0.0, 0.0, 0.0])
|
247 |
+
geometry_preview = gr.Model3D(label="Modelo Geométrico", height=380, clear_color=[0.0, 0.0, 0.0, 0.0])
|
248 |
|
249 |
with gr.Column(scale=1):
|
250 |
gr.Examples(
|
251 |
examples=[
|
252 |
+
[os.path.join(PROJECT_ROOT, "examples/images/000.png"), "a toy car"],
|
253 |
+
[os.path.join(PROJECT_ROOT, "examples/images/001.png"), "a red fire hydrant"],
|
254 |
+
[os.path.join(PROJECT_ROOT, "examples/images/004.png"), "a detailed, ornate, golden picture frame"],
|
|
|
|
|
|
|
|
|
|
|
255 |
],
|
256 |
+
inputs=[input_image, prompt],
|
257 |
+
cache_examples=False
|
258 |
)
|
259 |
|
260 |
+
# --- Lógica de la Interfaz ---
|
261 |
+
|
262 |
+
def handle_image_upload(img):
|
263 |
+
if img is None:
|
264 |
+
return None, gr.update(interactive=False)
|
265 |
+
save_name = str(uuid.uuid4())
|
266 |
+
original_path = f"{args.cache_dir}/{save_name}_original.png"
|
267 |
+
img.save(original_path)
|
268 |
+
return original_path, gr.update(interactive=True)
|
269 |
+
|
270 |
+
input_image.upload(fn=handle_image_upload, inputs=[input_image], outputs=[processed_image_path_state, btn_geo])
|
271 |
|
272 |
+
btn_enhance.click(fn=enhance_sketch, inputs=[input_image, prompt], outputs=[processed_image_preview, processed_image_path_state]).then(
|
273 |
+
fn=lambda: gr.update(interactive=True), outputs=[btn_geo]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
)
|
275 |
|
276 |
+
btn_geo.click(fn=generate_geometry, inputs=[processed_image_path_state, guidance_scale, inference_steps, max_facenum, symmetry, edge_type], outputs=[geometry_preview, geometry_path_state]).then(
|
277 |
+
fn=lambda: {btn_tex: gr.update(visible=True, interactive=True), textured_preview: gr.update(value=None)}, outputs=[btn_tex, textured_preview]
|
|
|
|
|
|
|
278 |
)
|
279 |
+
|
280 |
+
btn_tex.click(fn=generate_texture, inputs=[processed_image_path_state, geometry_path_state], outputs=[textured_preview])
|
281 |
|
282 |
+
demo.launch(ssr_mode=False, share=True)
|