Spaces:
Sleeping
Sleeping
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import numpy as np
|
|
| 6 |
import spaces
|
| 7 |
import random
|
| 8 |
import string
|
| 9 |
-
from diffusers import
|
| 10 |
import torch
|
| 11 |
from pathos.multiprocessing import ProcessingPool as ProcessPoolExecutor
|
| 12 |
import requests
|
|
@@ -16,15 +16,16 @@ pool = ProcessPoolExecutor(4)
|
|
| 16 |
pool.__enter__()
|
| 17 |
|
| 18 |
#model_id = "runwayml/stable-diffusion-v1-5"
|
| 19 |
-
model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
|
|
|
|
| 20 |
|
| 21 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
if torch.cuda.is_available():
|
| 23 |
torch.cuda.max_memory_allocated(device=device)
|
| 24 |
-
pipe =
|
| 25 |
pipe = pipe.to(device)
|
| 26 |
else:
|
| 27 |
-
pipe =
|
| 28 |
pipe = pipe.to(device)
|
| 29 |
|
| 30 |
def translate(text,lang):
|
|
@@ -74,16 +75,16 @@ def generate_random_string(length):
|
|
| 74 |
def Piper(_do):
|
| 75 |
return pipe(
|
| 76 |
_do,
|
| 77 |
-
height=
|
| 78 |
-
width=
|
| 79 |
-
negative_prompt='
|
| 80 |
-
num_inference_steps=
|
| 81 |
-
guidance_scale=
|
| 82 |
)
|
| 83 |
|
| 84 |
def infer(prompt):
|
| 85 |
name = generate_random_string(12)+".png"
|
| 86 |
-
_do = f'
|
| 87 |
image = Piper(_do).images[0].save(name)
|
| 88 |
return name
|
| 89 |
|
|
|
|
| 6 |
import spaces
|
| 7 |
import random
|
| 8 |
import string
|
| 9 |
+
from diffusers import AutoPipelineForText2Image
|
| 10 |
import torch
|
| 11 |
from pathos.multiprocessing import ProcessingPool as ProcessPoolExecutor
|
| 12 |
import requests
|
|
|
|
| 16 |
pool.__enter__()
|
| 17 |
|
| 18 |
#model_id = "runwayml/stable-diffusion-v1-5"
|
| 19 |
+
#model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
|
| 20 |
+
model_id = "kandinsky-community/kandinsky-2-2-decoder"
|
| 21 |
|
| 22 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 23 |
if torch.cuda.is_available():
|
| 24 |
torch.cuda.max_memory_allocated(device=device)
|
| 25 |
+
pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, token=os.getenv('hf_token'))
|
| 26 |
pipe = pipe.to(device)
|
| 27 |
else:
|
| 28 |
+
pipe = AutoPipelineForText2Image.from_pretrained(model_id, use_safetensors=True, token=os.getenv('hf_token'))
|
| 29 |
pipe = pipe.to(device)
|
| 30 |
|
| 31 |
def translate(text,lang):
|
|
|
|
| 75 |
def Piper(_do):
|
| 76 |
return pipe(
|
| 77 |
_do,
|
| 78 |
+
height=768,
|
| 79 |
+
width=512,
|
| 80 |
+
negative_prompt='ugly, deformed, disfigured, poor details, bad anatomy, labels, texts, logos',
|
| 81 |
+
num_inference_steps=50,
|
| 82 |
+
guidance_scale=8.5
|
| 83 |
)
|
| 84 |
|
| 85 |
def infer(prompt):
|
| 86 |
name = generate_random_string(12)+".png"
|
| 87 |
+
_do = f'{ translate(prompt,"english") }, cold color palette, muted colors, detailed, 8k'.lower()
|
| 88 |
image = Piper(_do).images[0].save(name)
|
| 89 |
return name
|
| 90 |
|