DM commited on
Commit
a5e0580
·
1 Parent(s): f6cdac9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -27
app.py CHANGED
@@ -1,29 +1,53 @@
1
- from diffusers import DiffusionPipeline
2
- import torch
3
 
4
- import torch
5
- from diffusers import DiffusionPipeline
6
- from diffusers import DPMSolverMultistepScheduler
7
- from torch import Generator
8
 
9
-
10
- path = 'segmind/portrait-finetuned' # Path to the appropriate model-type
11
- # Insert your prompt below.
12
- prompt = "Faceshot Portrait of pretty young (18-year-old) Caucasian wearing a high neck sweater, (masterpiece, extremely detailed skin, photorealistic, heavy shadow, dramatic and cinematic lighting, key light, fill light), sharp focus, BREAK epicrealism"
13
- # Insert negative prompt below. We recommend using this negative prompt for best results.
14
- negative_prompt = "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck"
15
-
16
- torch.set_grad_enabled(False)
17
- torch.backends.cudnn.benchmark = True
18
-
19
- # Below code will run on gpu, please pass cpu everywhere as the device and set 'dtype' to torch.float32 for cpu inference.
20
- with torch.inference_mode():
21
- gen = Generator("cuda")
22
- gen.manual_seed(1674753452)
23
- pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, safety_checker=None, requires_safety_checker=False)
24
- pipe.to('cuda')
25
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
26
- pipe.unet.to(device='cuda', dtype=torch.float16, memory_format=torch.channels_last)
27
-
28
- img = pipe(prompt=prompt,negative_prompt=negative_prompt, width=512, height=512, num_inference_steps=25, guidance_scale = 7, num_images_per_prompt=1, generator = gen).images[0]
29
- img.save("image.png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
 
 
3
 
4
+ if torch.cuda.is_available():
5
+ PYTORCH_CUDA_ALLOC_CONF={'max_split_size_mb': 6000}
6
+ torch.cuda.max_memory_allocated(device=device)
7
+ torch.cuda.empty_cache()
8
+
9
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
10
+ pipe.enable_xformers_memory_efficient_attention()
11
+ pipe = pipe.to(device)
12
+ torch.cuda.empty_cache()
13
+
14
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
15
+ refiner.enable_xformers_memory_efficient_attention()
16
+ refiner = refiner.to(device)
17
+ torch.cuda.empty_cache()
18
+
19
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
20
+ upscaler.enable_xformers_memory_efficient_attention()
21
+ upscaler = upscaler.to(device)
22
+ torch.cuda.empty_cache()
23
+ else:
24
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True)
25
+ pipe = pipe.to(device)
26
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
27
+ refiner = refiner.to(device)
28
+
29
+ def genie (prompt, negative_prompt, height, width, scale, steps, seed, upscaling):
30
+ generator = torch.Generator(device=device).manual_seed(seed)
31
+ int_image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
32
+ if upscaling == 'Yes':
33
+ image = refiner(prompt=prompt, image=int_image).images[0]
34
+ upscaled = upscaler(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
35
+ torch.cuda.empty_cache()
36
+ return (image, upscaled)
37
+ else:
38
+ image = refiner(prompt=prompt, negative_prompt=negative_prompt, image=int_image).images[0]
39
+ torch.cuda.empty_cache()
40
+ return (image, image)
41
+
42
+ gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit. A Token is Any Word, Number, Symbol, or Punctuation. Everything Over 77 Will Be Truncated!'),
43
+ gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
44
+ gr.Slider(512, 1024, 768, step=128, label='Height'),
45
+ gr.Slider(512, 1024, 768, step=128, label='Width'),
46
+ gr.Slider(1, 15, 10, step=.25, label='Guidance Scale: How Closely the AI follows the Prompt'),
47
+ gr.Slider(25, maximum=100, value=50, step=25, label='Number of Iterations'),
48
+ gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True, label='Seed'),
49
+ gr.Radio(['Yes', 'No'], label='Upscale?')],
50
+ outputs=['image', 'image'],
51
+ title="Stable Diffusion XL 1.0 GPU",
52
+ description="SDXL 1.0 GPU. <br><br><b>WARNING: Capable of producing NSFW (Softcore) images.</b>",
53
+ article = "If You Enjoyed this Demo and would like to Donate, you can send to any of these Wallets. <br>BTC: bc1qzdm9j73mj8ucwwtsjx4x4ylyfvr6kp7svzjn84 <br>3LWRoKYx6bCLnUrKEdnPo3FCSPQUSFDjFP <br>DOGE: DK6LRc4gfefdCTRk9xPD239N31jh9GjKez <br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)