RageshAntony commited on
Commit
e8ea8da
·
verified ·
1 Parent(s): b4bf7d7

aura and lum progress

Browse files
Files changed (1) hide show
  1. check_app.py +31 -43
check_app.py CHANGED
@@ -8,48 +8,18 @@ from diffusers import (
8
  AuraFlowPipeline,
9
  Kandinsky3Pipeline,
10
  HunyuanDiTPipeline,
11
- LuminaText2ImgPipeline
12
  )
13
  import gradio as gr
14
 
15
  cache_dir = '/workspace/hf_cache'
16
 
17
  MODEL_CONFIGS = {
18
- "FLUX": {
19
- "repo_id": "black-forest-labs/FLUX.1-dev",
20
- "pipeline_class": FluxPipeline,
21
- "cache_dir": cache_dir,
22
- },
23
- "Stable Diffusion 3.5": {
24
- "repo_id": "stabilityai/stable-diffusion-3.5-large",
25
- "pipeline_class": StableDiffusion3Pipeline,
26
- "cache_dir": cache_dir,
27
- },
28
- "PixArt": {
29
- "repo_id": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
30
- "pipeline_class": PixArtSigmaPipeline,
31
- "cache_dir": cache_dir,
32
- },
33
- "SANA": {
34
- "repo_id": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
35
- "pipeline_class": SanaPipeline,
36
- "cache_dir": cache_dir,
37
- },
38
- "AuraFlow": {
39
  "repo_id": "fal/AuraFlow",
40
  "pipeline_class": AuraFlowPipeline,
41
  "cache_dir": cache_dir,
42
  },
43
- "Kandinsky": {
44
- "repo_id": "kandinsky-community/kandinsky-3",
45
- "pipeline_class": Kandinsky3Pipeline,
46
- "cache_dir": cache_dir,
47
- },
48
- "Hunyuan": {
49
- "repo_id": "Tencent-Hunyuan/HunyuanDiT-Diffusers",
50
- "pipeline_class": HunyuanDiTPipeline,
51
- "cache_dir": cache_dir,
52
- },
53
  "Lumina": {
54
  "repo_id": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
55
  "pipeline_class": LuminaText2ImgPipeline,
@@ -57,33 +27,44 @@ MODEL_CONFIGS = {
57
  }
58
  }
59
 
60
- def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress()):
61
  generator = None
62
  if seed is not None:
63
  generator = torch.Generator("cuda").manual_seed(seed)
64
 
65
  def callback(pipe, step_index, timestep, callback_kwargs):
66
- print(f" callback => {pipe}, {step_index}, {timestep}")
67
  if step_index is None:
68
  step_index = 0
69
  cur_prg = step_index / num_steps
70
  progress(cur_prg, desc=f"Step {step_index}/{num_steps}")
71
  return callback_kwargs
72
 
73
- if hasattr(pipe, "guidance_scale"):
74
  image = pipe(
75
  prompt,
76
  num_inference_steps=num_steps,
77
  guidance_scale=guidance_scale,
78
  callback_on_step_end=callback,
79
  ).images[0]
80
- else:
 
 
 
 
 
 
 
81
  image = pipe(
82
  prompt,
83
  num_inference_steps=num_steps,
84
  generator=generator,
85
- output_type="pil",
86
- callback_on_step_end=callback,
 
 
 
 
87
  ).images[0]
88
 
89
  return image
@@ -97,11 +78,18 @@ def create_pipeline_logic(prompt_text, model_name):
97
  seed = 42
98
  config = MODEL_CONFIGS[model_name]
99
  pipe_class = config["pipeline_class"]
100
- pipe = pipe_class.from_pretrained(
101
- config["repo_id"],
102
- #cache_dir=config["cache_dir"],
103
- torch_dtype=torch.bfloat16
104
- ).to("cuda")
 
 
 
 
 
 
 
105
  image = generate_image_with_progress(
106
  pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress
107
  )
 
8
  AuraFlowPipeline,
9
  Kandinsky3Pipeline,
10
  HunyuanDiTPipeline,
11
+ LuminaText2ImgPipeline,AutoPipelineForText2Image
12
  )
13
  import gradio as gr
14
 
15
  cache_dir = '/workspace/hf_cache'
16
 
17
  MODEL_CONFIGS = {
18
+ "AuraFlow": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  "repo_id": "fal/AuraFlow",
20
  "pipeline_class": AuraFlowPipeline,
21
  "cache_dir": cache_dir,
22
  },
 
 
 
 
 
 
 
 
 
 
23
  "Lumina": {
24
  "repo_id": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
25
  "pipeline_class": LuminaText2ImgPipeline,
 
27
  }
28
  }
29
 
30
+ def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress(track_tqdm=True)):
31
  generator = None
32
  if seed is not None:
33
  generator = torch.Generator("cuda").manual_seed(seed)
34
 
35
  def callback(pipe, step_index, timestep, callback_kwargs):
36
+ print(f" callback => {step_index}, {timestep}")
37
  if step_index is None:
38
  step_index = 0
39
  cur_prg = step_index / num_steps
40
  progress(cur_prg, desc=f"Step {step_index}/{num_steps}")
41
  return callback_kwargs
42
 
43
+ if hasattr(pipe, "guidance_scale") and hasattr(pipe, "callback_on_step_end"):
44
  image = pipe(
45
  prompt,
46
  num_inference_steps=num_steps,
47
  guidance_scale=guidance_scale,
48
  callback_on_step_end=callback,
49
  ).images[0]
50
+ elif not hasattr(pipe, "callback_on_step_end") and hasattr(pipe, "guidance_scale"):
51
+ print("NO callback_on_step_end")
52
+ image = pipe(
53
+ prompt,
54
+ num_inference_steps=num_steps,
55
+ guidance_scale=guidance_scale,
56
+ ).images[0]
57
+ elif hasattr(pipe, "callback_on_step_end") and not hasattr(pipe, "guidance_scale"):
58
  image = pipe(
59
  prompt,
60
  num_inference_steps=num_steps,
61
  generator=generator,
62
+ callback_on_step_end=callback
63
+ ).images[0]
64
+ elif not hasattr(pipe, "callback_on_step_end") and not hasattr(pipe, "guidance_scale"):
65
+ image = pipe(
66
+ prompt,
67
+ num_inference_steps=num_steps,
68
  ).images[0]
69
 
70
  return image
 
78
  seed = 42
79
  config = MODEL_CONFIGS[model_name]
80
  pipe_class = config["pipeline_class"]
81
+ pipe = None
82
+ if model_name == "Kandinsky":
83
+ print("Kandinsky Special")
84
+ pipe = AutoPipelineForText2Image.from_pretrained(
85
+ "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16
86
+ )
87
+ else:
88
+ pipe = pipe_class.from_pretrained(
89
+ config["repo_id"],
90
+ #cache_dir=config["cache_dir"],
91
+ torch_dtype=torch.bfloat16
92
+ ).to("cuda")
93
  image = generate_image_with_progress(
94
  pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress
95
  )