Spaces:
Runtime error
Runtime error
import huggingface_hub | |
import os | |
hf_token = os.getenv('HF_TOKEN') | |
huggingface_hub.login(hf_token) | |
### | |
DEVICE = 'cuda' | |
### | |
from diffusers import StableDiffusionPipeline, DDIMScheduler | |
import torch | |
model_path = 'Arkan0ID/dreambooth-dmitry-thumbs-up' | |
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16).to(DEVICE) | |
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) | |
pipe.enable_xformers_memory_efficient_attention() | |
### | |
from transformers import CLIPProcessor, CLIPModel | |
model = CLIPModel.from_pretrained('openai/clip-vit-large-patch14') | |
processor = CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14') | |
### | |
import gradio as gr | |
import numpy as np | |
from torch.nn.functional import cosine_similarity | |
def inference(prompt, negative_prompt, num_samples, height, width, num_inference_steps, guidance_scale, seed, top_k): | |
g_cuda = torch.Generator(DEVICE) | |
g_cuda.manual_seed(int(seed)) | |
with torch.autocast(DEVICE), torch.inference_mode(): | |
images = pipe( | |
prompt, height=int(height), width=int(width), | |
negative_prompt='ugly nerd with distorted fingers' + negative_prompt, | |
num_images_per_prompt=int(num_samples), | |
num_inference_steps=int(num_inference_steps), | |
guidance_scale=guidance_scale, | |
generator=g_cuda | |
).images | |
res = [] | |
for i, image in enumerate(images): | |
inputs = processor(text='thumbs up', images=image, return_tensors='pt', padding=True) | |
outputs = model(**inputs) | |
res.append(cosine_similarity(outputs.text_embeds, outputs.image_embeds).detach().item()) | |
idx = np.argsort(res) | |
return [images[i] for i in idx][-int(top_k):] | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
prompt = gr.Textbox(label='Prompt', value='photo of <Dmitry> <thumbs-up>') | |
negative_prompt = gr.Textbox(label='Negative Prompt', value='') | |
run = gr.Button(value='Generate') | |
guidance_scale = gr.Number(label='Guidance Scale', value=7.5) | |
with gr.Row(): | |
num_samples = gr.Number(label='Number of Samples', value=4) | |
top_k = gr.Number(label='Take best K', value=1) | |
with gr.Row(): | |
height = gr.Number(label='Height', value=768) | |
width = gr.Number(label='Width', value=768) | |
with gr.Row(): | |
num_inference_steps = gr.Slider(label='Steps', value=50) | |
seed = gr.Number(label='Seed', value=42) | |
with gr.Column(): | |
gallery = gr.Gallery() | |
run.click(inference, inputs=[prompt, negative_prompt, num_samples, height, width, num_inference_steps, guidance_scale, seed, top_k], outputs=gallery) | |
demo.launch() |