Spaces:
Running
on
Zero
Running
on
Zero
RageshAntony
commited on
added pipe logic
Browse files- check_app.py +8 -6
check_app.py
CHANGED
@@ -37,17 +37,17 @@ class ProgressAuraFlowPipeline(DiffusionPipeline):
|
|
37 |
self._num_inference_steps = num_inference_steps
|
38 |
self._step = 0
|
39 |
|
40 |
-
def progress_callback(
|
41 |
if callback and step_index % callback_steps == 0:
|
42 |
-
|
|
|
43 |
return callback_kwargs
|
44 |
|
45 |
# Monkey patch the original pipeline's progress tracking
|
46 |
original_step = self.original_pipeline.scheduler.step
|
47 |
def wrapped_step(*args, **kwargs):
|
48 |
self._step += 1
|
49 |
-
|
50 |
-
progress_callback(self, self._step, None, {})
|
51 |
return original_step(*args, **kwargs)
|
52 |
|
53 |
self.original_pipeline.scheduler.step = wrapped_step
|
@@ -111,7 +111,7 @@ MODEL_CONFIGS = {
|
|
111 |
}
|
112 |
}
|
113 |
|
114 |
-
def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress(track_tqdm=True)):
|
115 |
generator = None
|
116 |
if seed is not None:
|
117 |
generator = torch.Generator("cuda").manual_seed(seed)
|
@@ -162,6 +162,8 @@ def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, s
|
|
162 |
prompt,
|
163 |
num_inference_steps=num_steps,
|
164 |
generator=generator,
|
|
|
|
|
165 |
).images[0]
|
166 |
|
167 |
return image
|
@@ -194,7 +196,7 @@ def create_pipeline_logic(prompt_text, model_name):
|
|
194 |
).to("cuda")
|
195 |
|
196 |
image = generate_image_with_progress(
|
197 |
-
pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress
|
198 |
)
|
199 |
return f"Seed: {seed}", image
|
200 |
|
|
|
37 |
self._num_inference_steps = num_inference_steps
|
38 |
self._step = 0
|
39 |
|
40 |
+
def progress_callback(step_index, timestep, callback_kwargs):
|
41 |
if callback and step_index % callback_steps == 0:
|
42 |
+
# Pass self (the pipeline) to the callback
|
43 |
+
callback(self, step_index, timestep, callback_kwargs)
|
44 |
return callback_kwargs
|
45 |
|
46 |
# Monkey patch the original pipeline's progress tracking
|
47 |
original_step = self.original_pipeline.scheduler.step
|
48 |
def wrapped_step(*args, **kwargs):
|
49 |
self._step += 1
|
50 |
+
progress_callback(self._step, None, {})
|
|
|
51 |
return original_step(*args, **kwargs)
|
52 |
|
53 |
self.original_pipeline.scheduler.step = wrapped_step
|
|
|
111 |
}
|
112 |
}
|
113 |
|
114 |
+
def generate_image_with_progress(model_name,pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress(track_tqdm=True)):
|
115 |
generator = None
|
116 |
if seed is not None:
|
117 |
generator = torch.Generator("cuda").manual_seed(seed)
|
|
|
162 |
prompt,
|
163 |
num_inference_steps=num_steps,
|
164 |
generator=generator,
|
165 |
+
callback=callback,
|
166 |
+
callback_steps=1,
|
167 |
).images[0]
|
168 |
|
169 |
return image
|
|
|
196 |
).to("cuda")
|
197 |
|
198 |
image = generate_image_with_progress(
|
199 |
+
model_name,pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress
|
200 |
)
|
201 |
return f"Seed: {seed}", image
|
202 |
|