ZennyKenny commited on
Commit
b86f890
·
verified ·
1 Parent(s): 000f373

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -20,7 +20,7 @@ MAX_IMAGE_SIZE = 2048
20
  dtype = torch.bfloat16
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
23
- # ✅ Load model only once
24
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
25
 
26
  pipe = DiffusionPipeline.from_pretrained(
@@ -40,7 +40,6 @@ def sanitize_filename(name):
40
  @spaces.GPU(duration=75)
41
  def infer(user_token, prompt, seed=42, randomize_seed=False, width=1024, height=1024,
42
  guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
43
- # Authenticate using user's token for this session
44
  login(token=user_token)
45
 
46
  if randomize_seed:
@@ -58,24 +57,27 @@ def infer(user_token, prompt, seed=42, randomize_seed=False, width=1024, height=
58
  generator=generator,
59
  output_type="pil",
60
  ):
61
- # Save low-quality JPG
62
  safe_name = sanitize_filename(prompt)
63
  img_path = f"image_preview/{safe_name}_{seed}.jpg"
64
  img.convert("RGB").save(img_path, "JPEG", quality=60)
65
 
66
- # Collect previews
67
  previews = [f"image_preview/{f}" for f in sorted(os.listdir("image_preview")) if f.endswith(".jpg")]
68
  return img, seed, previews
69
 
70
-
71
- examples = [
72
- ["your_token_here", "a man walking in the forest"],
73
- ["your_token_here", "a viking ship sailing down a river"],
74
- ["your_token_here", "a woman resting by an open fire"],
75
- ["your_token_here", "a sword fight in a medieval village"]
 
 
 
 
 
 
76
  ]
77
 
78
-
79
  with gr.Blocks(css="style.css") as natalie_diffusion:
80
  with gr.Row():
81
  with gr.Column(scale=1, elem_id="left-column"):
@@ -118,14 +120,13 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
118
  result_example = gr.Image(visible=False)
119
 
120
  gr.Examples(
121
- examples=examples,
122
- fn=infer,
123
- inputs=[hf_token_input, prompt],
124
  outputs=[result_example, seed, gr.Gallery(visible=False)],
125
  cache_examples=False,
126
  )
127
 
128
-
129
  with gr.Column(scale=1, elem_id="right-column"):
130
  result = gr.Image(label="", show_label=False, elem_id="generated-image")
131
 
@@ -135,7 +136,7 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
135
 
136
  gr.on(
137
  triggers=[run_button.click, prompt.submit],
138
- fn=infer,
139
  inputs=[hf_token_input, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
140
  outputs=[result, seed, gallery],
141
  )
 
20
  dtype = torch.bfloat16
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
23
+ # ✅ DO NOT CHANGE: Working pipeline using taef1
24
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
25
 
26
  pipe = DiffusionPipeline.from_pretrained(
 
40
  @spaces.GPU(duration=75)
41
  def infer(user_token, prompt, seed=42, randomize_seed=False, width=1024, height=1024,
42
  guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
 
43
  login(token=user_token)
44
 
45
  if randomize_seed:
 
57
  generator=generator,
58
  output_type="pil",
59
  ):
 
60
  safe_name = sanitize_filename(prompt)
61
  img_path = f"image_preview/{safe_name}_{seed}.jpg"
62
  img.convert("RGB").save(img_path, "JPEG", quality=60)
63
 
 
64
  previews = [f"image_preview/{f}" for f in sorted(os.listdir("image_preview")) if f.endswith(".jpg")]
65
  return img, seed, previews
66
 
67
+ # Wrapper to inject a fallback token if needed
68
+ def infer_with_fallback_token(user_token, prompt, *args):
69
+ if not user_token.strip():
70
+ user_token = "your_token_here" # Replace with a real test token for dev, not in production
71
+ return infer(user_token, prompt, *args)
72
+
73
+ # Prompt-only examples; token will be filled in by wrapper
74
+ prompt_examples = [
75
+ "a man walking in the forest",
76
+ "a viking ship sailing down a river",
77
+ "a woman resting by an open fire",
78
+ "a sword fight in a medieval village"
79
  ]
80
 
 
81
  with gr.Blocks(css="style.css") as natalie_diffusion:
82
  with gr.Row():
83
  with gr.Column(scale=1, elem_id="left-column"):
 
120
  result_example = gr.Image(visible=False)
121
 
122
  gr.Examples(
123
+ examples=[[prompt] for prompt in prompt_examples],
124
+ fn=lambda prompt: infer_with_fallback_token("", prompt),
125
+ inputs=[prompt],
126
  outputs=[result_example, seed, gr.Gallery(visible=False)],
127
  cache_examples=False,
128
  )
129
 
 
130
  with gr.Column(scale=1, elem_id="right-column"):
131
  result = gr.Image(label="", show_label=False, elem_id="generated-image")
132
 
 
136
 
137
  gr.on(
138
  triggers=[run_button.click, prompt.submit],
139
+ fn=infer_with_fallback_token,
140
  inputs=[hf_token_input, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
141
  outputs=[result, seed, gallery],
142
  )