asahi417 commited on
Commit
4fc0c6f
·
verified ·
1 Parent(s): 1ede02f

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/examples/demo19.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Stable Depth2image V1
3
- emoji: 🌍
4
  colorFrom: blue
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Stable Depth2Image V1
3
+ emoji: 🦴
4
  colorFrom: blue
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.36.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import os
5
+ from PIL import Image
6
+ import spaces
7
+ import torch
8
+ from transformers import pipeline
9
+ from diffusers import StableDiffusionDepth2ImgPipeline
10
+
11
+
12
+ model_id_depth2image = "stabilityai/stable-diffusion-2-depth"
13
+ if torch.cuda.is_available():
14
+ pipe_depth2image = StableDiffusionDepth2ImgPipeline.from_pretrained(model_id_depth2image, torch_dtype=torch.float16).to("cuda")
15
+ else:
16
+ pipe_depth2image = StableDiffusionDepth2ImgPipeline.from_pretrained(model_id_depth2image)
17
+ max_seed = np.iinfo(np.int32).max
18
+ max_image_size = 1344
19
+ example_files = [os.path.join('assets/examples', filename) for filename in sorted(os.listdir('assets/examples'))]
20
+
21
+
22
+ @spaces.GPU
23
+ def infer(
24
+ init_image,
25
+ prompt,
26
+ negative_prompt,
27
+ seed,
28
+ randomize_seed,
29
+ width,
30
+ height,
31
+ guidance_scale,
32
+ num_inference_steps):
33
+ if randomize_seed:
34
+ seed = random.randint(0, max_seed)
35
+ init_image = Image.fromarray(np.uint8(init_image))
36
+ image = pipe_depth2image(
37
+ prompt=prompt,
38
+ image=init_image,
39
+ negative_prompt=negative_prompt,
40
+ guidance_scale=guidance_scale,
41
+ num_inference_steps=num_inference_steps,
42
+ height=height,
43
+ width=width,
44
+ generator=torch.Generator().manual_seed(seed)
45
+ ).images[0]
46
+ return image, seed
47
+
48
+
49
+ with gr.Blocks() as demo:
50
+ gr.Markdown("# Demo [Depth2Image](https://huggingface.co/stabilityai/stable-diffusion-2-depth).")
51
+ with gr.Row():
52
+ prompt = gr.Text(
53
+ label="Prompt",
54
+ show_label=True,
55
+ max_lines=1,
56
+ placeholder="Enter your prompt",
57
+ container=False,
58
+ )
59
+ run_button = gr.Button("Run", scale=0)
60
+ with gr.Row():
61
+ init_image = gr.Image(label="Input Image", type='numpy')
62
+ result = gr.Image(label="Result")
63
+ with gr.Accordion("Advanced Settings", open=False):
64
+ negative_prompt = gr.Text(
65
+ label="Negative Prompt",
66
+ max_lines=1,
67
+ placeholder="Enter a negative prompt",
68
+ )
69
+ seed = gr.Slider(
70
+ label="Seed",
71
+ minimum=0,
72
+ maximum=max_seed,
73
+ step=1,
74
+ value=0,
75
+ )
76
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
77
+ with gr.Row():
78
+ width = gr.Slider(
79
+ label="Width",
80
+ minimum=256,
81
+ maximum=max_image_size,
82
+ step=64,
83
+ value=1024,
84
+ )
85
+ height = gr.Slider(
86
+ label="Height",
87
+ minimum=256,
88
+ maximum=max_image_size,
89
+ step=64,
90
+ value=1024,
91
+ )
92
+ with gr.Row():
93
+ guidance_scale = gr.Slider(
94
+ label="Guidance scale",
95
+ minimum=0.0,
96
+ maximum=10.0,
97
+ step=0.1,
98
+ value=7.5,
99
+ )
100
+ num_inference_steps = gr.Slider(
101
+ label="Number of inference steps",
102
+ minimum=1,
103
+ maximum=50,
104
+ step=1,
105
+ value=50,
106
+ )
107
+ gr.on(
108
+ triggers=[run_button.click, prompt.submit, negative_prompt.submit],
109
+ fn=infer,
110
+ inputs=[init_image, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
111
+ outputs=[result, seed]
112
+ )
113
+ examples = gr.Examples(
114
+ examples=example_files, inputs=[init_image], outputs=[result, seed]
115
+ )
116
+
117
+
118
+ demo.queue().launch()
assets/examples/demo01.jpg ADDED
assets/examples/demo02.jpg ADDED
assets/examples/demo03.jpg ADDED
assets/examples/demo04.jpg ADDED
assets/examples/demo05.jpg ADDED
assets/examples/demo06.jpg ADDED
assets/examples/demo07.jpg ADDED
assets/examples/demo08.jpg ADDED
assets/examples/demo09.jpg ADDED
assets/examples/demo10.jpg ADDED
assets/examples/demo11.jpg ADDED
assets/examples/demo12.jpg ADDED
assets/examples/demo13.jpg ADDED
assets/examples/demo14.jpg ADDED
assets/examples/demo15.jpg ADDED
assets/examples/demo16.jpg ADDED
assets/examples/demo17.jpg ADDED
assets/examples/demo18.jpg ADDED
assets/examples/demo19.jpg ADDED

Git LFS Details

  • SHA256: 7cdb09c34eb0b4d2ac5f6070aec47c8f983a0b1b2c9ee1fc30decafb64f1bd98
  • Pointer size: 132 Bytes
  • Size of remote file: 1 MB
assets/examples/demo20.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+ transformers
3
+ accelerate
4
+ sentencepiece
5
+ gradio==4.36.0
6
+ torch