Spaces:
Running
on
Zero
Running
on
Zero
RageshAntony
commited on
added more args and optim
Browse files- check_app.py +25 -41
check_app.py
CHANGED
@@ -116,7 +116,7 @@ MODEL_CONFIGS = {
|
|
116 |
}
|
117 |
}
|
118 |
|
119 |
-
def generate_image_with_progress(model_name,pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress(track_tqdm=True)):
|
120 |
generator = None
|
121 |
if seed is not None:
|
122 |
generator = torch.Generator("cuda").manual_seed(seed)
|
@@ -136,42 +136,29 @@ def generate_image_with_progress(model_name,pipe, prompt, num_steps, guidance_sc
|
|
136 |
has_guidance_scale = "guidance_scale" in pipe_signature.parameters
|
137 |
has_callback_on_step_end = "callback_on_step_end" in pipe_signature.parameters
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
num_inference_steps=num_steps,
|
163 |
-
generator=generator,
|
164 |
-
callback_on_step_end=callback,
|
165 |
-
).images[0]
|
166 |
-
elif not has_callback_on_step_end and not has_guidance_scale:
|
167 |
-
print("NO callback_on_step_end and NO guidance_scale")
|
168 |
-
image = pipe(
|
169 |
-
prompt,
|
170 |
-
num_inference_steps=num_steps,
|
171 |
-
generator=generator,
|
172 |
-
callback=callback,
|
173 |
-
callback_steps=1,
|
174 |
-
).images[0]
|
175 |
|
176 |
return image
|
177 |
|
@@ -179,9 +166,6 @@ def generate_image_with_progress(model_name,pipe, prompt, num_steps, guidance_sc
|
|
179 |
def create_pipeline_logic(prompt_text, model_name, negative_prompt="", seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=4.5, num_inference_steps=40,):
|
180 |
print(f"starting {model_name}")
|
181 |
progress = gr.Progress(track_tqdm=True)
|
182 |
-
num_steps = 30
|
183 |
-
guidance_scale = 7.5 # Example guidance scale, can be adjusted per model
|
184 |
-
seed = 42
|
185 |
config = MODEL_CONFIGS[model_name]
|
186 |
pipe_class = config["pipeline_class"]
|
187 |
pipe = None
|
@@ -201,7 +185,7 @@ def create_pipeline_logic(prompt_text, model_name, negative_prompt="", seed=42,
|
|
201 |
pipe = b_pipe
|
202 |
|
203 |
image = generate_image_with_progress(
|
204 |
-
model_name,pipe, prompt_text, num_steps=
|
205 |
)
|
206 |
return f"Seed: {seed}", image
|
207 |
|
|
|
116 |
}
|
117 |
}
|
118 |
|
119 |
+
def generate_image_with_progress(model_name,pipe, prompt, num_steps, guidance_scale=None, seed=None,negative_prompt, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
|
120 |
generator = None
|
121 |
if seed is not None:
|
122 |
generator = torch.Generator("cuda").manual_seed(seed)
|
|
|
136 |
has_guidance_scale = "guidance_scale" in pipe_signature.parameters
|
137 |
has_callback_on_step_end = "callback_on_step_end" in pipe_signature.parameters
|
138 |
|
139 |
+
# Define common arguments
|
140 |
+
common_args = {
|
141 |
+
"prompt": prompt,
|
142 |
+
"num_inference_steps": num_steps,
|
143 |
+
"negative_prompt": negative_prompt,
|
144 |
+
"width": width,
|
145 |
+
"height": height,
|
146 |
+
"generator": generator,
|
147 |
+
}
|
148 |
+
|
149 |
+
if has_guidance_scale:
|
150 |
+
common_args["guidance_scale"] = guidance_scale
|
151 |
+
|
152 |
+
if has_callback_on_step_end:
|
153 |
+
print("has callback_on_step_end and", "has guidance_scale" if has_guidance_scale else "NO guidance_scale")
|
154 |
+
common_args["callback_on_step_end"] = callback
|
155 |
+
else:
|
156 |
+
print("NO callback_on_step_end and", "has guidance_scale" if has_guidance_scale else "NO guidance_scale")
|
157 |
+
common_args["callback"] = callback
|
158 |
+
common_args["callback_steps"] = 1
|
159 |
+
|
160 |
+
# Generate image
|
161 |
+
image = pipe(**common_args).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
return image
|
164 |
|
|
|
166 |
def create_pipeline_logic(prompt_text, model_name, negative_prompt="", seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=4.5, num_inference_steps=40,):
|
167 |
print(f"starting {model_name}")
|
168 |
progress = gr.Progress(track_tqdm=True)
|
|
|
|
|
|
|
169 |
config = MODEL_CONFIGS[model_name]
|
170 |
pipe_class = config["pipeline_class"]
|
171 |
pipe = None
|
|
|
185 |
pipe = b_pipe
|
186 |
|
187 |
image = generate_image_with_progress(
|
188 |
+
model_name,pipe, prompt_text, num_steps=num_inference_steps, guidance_scale=guidance_scale, seed=seed,negative_prompt, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=progress
|
189 |
)
|
190 |
return f"Seed: {seed}", image
|
191 |
|