Julien Blanchon commited on
Commit
aef3da7
·
1 Parent(s): 98bf3ed
Files changed (2) hide show
  1. app.py +1 -1
  2. requirements.txt +1 -1
app.py CHANGED
@@ -152,8 +152,8 @@ def generate_image(
152
  progress(0.1, desc="Loading text encoder...")
153
 
154
  # Load text encoder
155
- text_encoder.to(device)
156
  text_encoder.set_attn_implementation("flash_attention_2")
 
157
 
158
  # Encode prompt
159
  cap_features, cap_mask = encode_prompt(
 
152
  progress(0.1, desc="Loading text encoder...")
153
 
154
  # Load text encoder
 
155
  text_encoder.set_attn_implementation("flash_attention_2")
156
+ text_encoder.to(device)
157
 
158
  # Encode prompt
159
  cap_features, cap_mask = encode_prompt(
requirements.txt CHANGED
@@ -10,7 +10,7 @@ numpy
10
  Pillow
11
  safetensors
12
  tqdm
13
- # flash-attn @ https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.3.14/flash_attn-2.8.2+cu129torch2.8-cp310-cp310-linux_x86_64.whl
14
  accelerate
15
  kernels
16
  timm
 
10
  Pillow
11
  safetensors
12
  tqdm
13
+ flash-attn @ https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.3.14/flash_attn-2.8.2+cu129torch2.8-cp310-cp310-linux_x86_64.whl
14
  accelerate
15
  kernels
16
  timm