Spaces:
Runtime error
Runtime error
fix error
Browse files- check_app.py +18 -23
check_app.py
CHANGED
|
@@ -89,27 +89,23 @@ def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, s
|
|
| 89 |
return image
|
| 90 |
|
| 91 |
@spaces.GPU(duration=170)
|
| 92 |
-
def create_pipeline_logic(model_name, config):
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
return f"Seed: {seed}", image
|
| 110 |
-
|
| 111 |
-
return start_process
|
| 112 |
-
|
| 113 |
|
| 114 |
def main():
|
| 115 |
with gr.Blocks() as app:
|
|
@@ -123,8 +119,7 @@ def main():
|
|
| 123 |
output = gr.Textbox(label="Status")
|
| 124 |
img = gr.Image(label=model_name, height=300)
|
| 125 |
|
| 126 |
-
|
| 127 |
-
button.click(fn=start_process, inputs=[prompt_text], outputs=[output, img])
|
| 128 |
|
| 129 |
app.launch()
|
| 130 |
|
|
|
|
| 89 |
return image
|
| 90 |
|
| 91 |
@spaces.GPU(duration=170)
|
| 92 |
+
def create_pipeline_logic(prompt_text, model_name, config):
|
| 93 |
+
print(f"starting {model_name}")
|
| 94 |
+
progress = gr.Progress()
|
| 95 |
+
num_steps = 30
|
| 96 |
+
guidance_scale = 7.5 # Example guidance scale, can be adjusted per model
|
| 97 |
+
seed = 42
|
| 98 |
+
|
| 99 |
+
pipe_class = config["pipeline_class"]
|
| 100 |
+
pipe = pipe_class.from_pretrained(
|
| 101 |
+
config["repo_id"],
|
| 102 |
+
#cache_dir=config["cache_dir"],
|
| 103 |
+
torch_dtype=torch.bfloat16
|
| 104 |
+
).to("cuda")
|
| 105 |
+
image = generate_image_with_progress(
|
| 106 |
+
pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress
|
| 107 |
+
)
|
| 108 |
+
return f"Seed: {seed}", image
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
def main():
|
| 111 |
with gr.Blocks() as app:
|
|
|
|
| 119 |
output = gr.Textbox(label="Status")
|
| 120 |
img = gr.Image(label=model_name, height=300)
|
| 121 |
|
| 122 |
+
button.click(fn=create_pipeline_logic, inputs=[prompt_text, model_name, config], outputs=[output, img])
|
|
|
|
| 123 |
|
| 124 |
app.launch()
|
| 125 |
|