Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -127,7 +127,7 @@ pipe = load_and_prepare_model()
|
|
127 |
|
128 |
text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder',token=True)#.to(device=device, dtype=torch.bfloat16)
|
129 |
text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
130 |
-
|
131 |
MAX_SEED = np.iinfo(np.int32).max
|
132 |
|
133 |
neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
|
@@ -193,22 +193,22 @@ def generate_30(
|
|
193 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
194 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
195 |
|
196 |
-
text_inputs1 = tokenizer(
|
197 |
prompt,
|
198 |
padding="max_length",
|
199 |
max_length=77,
|
200 |
truncation=True,
|
201 |
return_tensors="pt",
|
202 |
)
|
203 |
-
text_inputs2 = tokenizer(
|
204 |
prompt2,
|
205 |
padding="max_length",
|
206 |
max_length=77,
|
207 |
truncation=True,
|
208 |
return_tensors="pt",
|
209 |
)
|
210 |
-
prompt_embedsa = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
211 |
-
prompt_embedsb = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
212 |
prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
|
213 |
|
214 |
options = {
|
|
|
127 |
|
128 |
text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder',token=True)#.to(device=device, dtype=torch.bfloat16)
|
129 |
text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
130 |
+
|
131 |
MAX_SEED = np.iinfo(np.int32).max
|
132 |
|
133 |
neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
|
|
|
193 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
194 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
195 |
|
196 |
+
text_inputs1 = pipe.tokenizer(
|
197 |
prompt,
|
198 |
padding="max_length",
|
199 |
max_length=77,
|
200 |
truncation=True,
|
201 |
return_tensors="pt",
|
202 |
)
|
203 |
+
text_inputs2 = pipe.tokenizer(
|
204 |
prompt2,
|
205 |
padding="max_length",
|
206 |
max_length=77,
|
207 |
truncation=True,
|
208 |
return_tensors="pt",
|
209 |
)
|
210 |
+
prompt_embedsa = pipe.text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
211 |
+
prompt_embedsb = pipe.text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
212 |
prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
|
213 |
|
214 |
options = {
|