Spaces:
Running
on
A10G
Running
on
A10G
redbrain
commited on
Commit
·
d4fa03e
1
Parent(s):
5771d32
Initial everything
Browse files- README.md +2 -2
- app.py +150 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
---
|
2 |
title: Dendrokronos
|
3 |
emoji: 🌳
|
4 |
-
colorFrom:
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.8.0
|
@@ -10,4 +10,4 @@ pinned: false
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: Dendrokronos
|
3 |
emoji: 🌳
|
4 |
+
colorFrom: green
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.8.0
|
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import all the libraries
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import scipy
|
5 |
+
from PIL import Image
|
6 |
+
import torch
|
7 |
+
import torchvision.transforms as tforms
|
8 |
+
from diffusers import DiffusionPipeline, DDIMScheduler, DDIMInverseScheduler
|
9 |
+
from diffusers.models import AutoencoderKL
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
# load SDXL pipeline
|
13 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
14 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16)
|
15 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
16 |
+
pipe = pipe.to("cuda")
|
17 |
+
|
18 |
+
# optimize for speed
|
19 |
+
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True) # hopefully this works on Ampere series GPU
|
20 |
+
pipe(prompt="an astronaut riding a green horse", num_inference_steps=25) # force lengthy JIT compilation to happen ahead of time
|
21 |
+
|
22 |
+
# watermarking helper functions. paraphrased from the reference impl of arXiv:2305.20030
|
23 |
+
|
24 |
+
def circle_mask(size=128, r=16, x_offset=0, y_offset=0):
|
25 |
+
x0 = y0 = size // 2
|
26 |
+
x0 += x_offset
|
27 |
+
y0 += y_offset
|
28 |
+
y, x = np.ogrid[:size, :size]
|
29 |
+
y = y[::-1]
|
30 |
+
return ((x - x0)**2 + (y-y0)**2)<= r**2
|
31 |
+
|
32 |
+
def get_pattern(shape, w_seed=999999):
|
33 |
+
g = torch.Generator(device=pipe.device)
|
34 |
+
g.manual_seed(w_seed)
|
35 |
+
gt_init = pipe.prepare_latents(1, pipe.unet.in_channels,
|
36 |
+
1024, 1024,
|
37 |
+
pipe.unet.dtype, pipe.device, g)
|
38 |
+
gt_patch = torch.fft.fftshift(torch.fft.fft2(gt_init), dim=(-1, -2))
|
39 |
+
# ring pattern. paper found this to be effective
|
40 |
+
gt_patch_tmp = gt_patch.clone().detach()
|
41 |
+
for i in range(shape[-1] // 2, 0, -1):
|
42 |
+
tmp_mask = circle_mask(gt_init.shape[-1], r=i)
|
43 |
+
tmp_mask = torch.tensor(tmp_mask)
|
44 |
+
for j in range(gt_patch.shape[1]):
|
45 |
+
gt_patch[:, j, tmp_mask] = gt_patch_tmp[0, j, 0, i].item()
|
46 |
+
|
47 |
+
return gt_patch
|
48 |
+
|
49 |
+
def transform_img(image):
|
50 |
+
tform = tforms.Compose([tforms.Resize(1024),tforms.CenterCrop(1024),tforms.ToTensor()])
|
51 |
+
image = tform(image)
|
52 |
+
return 2.0 * image - 1.0
|
53 |
+
|
54 |
+
# hyperparameters
|
55 |
+
shape = (1, 4, 128, 128)
|
56 |
+
w_seed = 7433 # TREE :)
|
57 |
+
w_channel = 0
|
58 |
+
w_radius = 16 # the suggested r from section 4.4 of paper
|
59 |
+
|
60 |
+
# get w_key and w_mask
|
61 |
+
np_mask = circle_mask(shape[-1], r=w_radius)
|
62 |
+
torch_mask = torch.tensor(np_mask).to(pipe.device)
|
63 |
+
w_mask = torch.zeros(shape, dtype=torch.bool).to(pipe.device)
|
64 |
+
w_mask[:, w_channel] = torch_mask
|
65 |
+
w_key = get_pattern(shape, w_seed=w_seed).to(pipe.device)
|
66 |
+
|
67 |
+
|
68 |
+
def get_noise():
|
69 |
+
# moved w_key and w_mask to globals
|
70 |
+
|
71 |
+
# inject watermark
|
72 |
+
init_latents = pipe.prepare_latents(1, pipe.unet.in_channels,
|
73 |
+
1024, 1024,
|
74 |
+
pipe.unet.dtype, pipe.device, None)
|
75 |
+
init_latents_fft = torch.fft.fftshift(torch.fft.fft2(init_latents), dim=(-1, -2))
|
76 |
+
init_latents_fft[w_mask] = w_key[w_mask].clone()
|
77 |
+
init_latents = torch.fft.ifft2(torch.fft.ifftshift(init_latents_fft, dim=(-1, -2))).real
|
78 |
+
# hot fix to prevent out of bounds values. will "properly" fix this later
|
79 |
+
init_latents[init_latents == float("Inf")] = 4
|
80 |
+
init_latents[init_latents == float("-Inf")] = -4
|
81 |
+
|
82 |
+
return init_latents
|
83 |
+
|
84 |
+
def detect(image):
|
85 |
+
# invert scheduler
|
86 |
+
curr_scheduler = pipe.scheduler
|
87 |
+
pipe.scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
88 |
+
|
89 |
+
# ddim inversion
|
90 |
+
img = transform_img(image).unsqueeze(0).to(pipe.unet.dtype).to(pipe.device)
|
91 |
+
image_latents = pipe.vae.encode(img).latent_dist.mode() * 0.13025
|
92 |
+
inverted_latents = pipe(prompt="", latents=image_latents, guidance_scale=1, num_inference_steps=25, output_type="latent")
|
93 |
+
inverted_latents = inverted_latents.images
|
94 |
+
|
95 |
+
# calculate p-value instead of detection threshold. more rigorous, plus we can do a non-boolean output
|
96 |
+
inverted_latents_fft = torch.fft.fftshift(torch.fft.fft2(inverted_latents), dim=(-1, -2))[w_mask].flatten()
|
97 |
+
target = w_key[w_mask].flatten()
|
98 |
+
inverted_latents_fft = torch.concatenate([inverted_latents_fft.real, inverted_latents_fft.imag])
|
99 |
+
target = torch.concatenate([target.real, target.imag])
|
100 |
+
|
101 |
+
sigma = inverted_latents_fft.std()
|
102 |
+
lamda = (target ** 2 / sigma ** 2).sum().item()
|
103 |
+
x = (((inverted_latents_fft - target) / sigma) ** 2).sum().item()
|
104 |
+
p_value = scipy.stats.ncx2.cdf(x=x, df=len(target), nc=lamda)
|
105 |
+
|
106 |
+
# revert scheduler
|
107 |
+
pipe.scheduler = curr_scheduler
|
108 |
+
|
109 |
+
if p_value == 0:
|
110 |
+
return 1.0
|
111 |
+
else:
|
112 |
+
return max(0.0, 1-1/math.log(5/p_value,10))
|
113 |
+
|
114 |
+
def generate(prompt):
|
115 |
+
return pipe(prompt=prompt, num_inference_steps=25, latents=get_noise()).images[0]
|
116 |
+
|
117 |
+
# actual gradio demo
|
118 |
+
|
119 |
+
def manager(input, progress=gr.Progress(track_tqdm=True)): # to prevent the queue from overloading
|
120 |
+
if type(input) == str:
|
121 |
+
return generate(input)
|
122 |
+
elif type(input) == np.ndarray:
|
123 |
+
image = Image.fromarray(input)
|
124 |
+
percent = detect(image)
|
125 |
+
return {"watermarked": percent, "not_watermarked": 1.0-percent}
|
126 |
+
|
127 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green",secondary_hue="green", font=gr.themes.GoogleFont("Fira Sans"))) as app:
|
128 |
+
with gr.Row():
|
129 |
+
gr.HTML('<center><p>Bad actors are using generative AI to destroy the livelihoods of real artists. We need transparency now.</p><h1><span style="font-size:1.5em">Introducing Dendrokronos 🌳</span></h1></center>')
|
130 |
+
with gr.Row():
|
131 |
+
with gr.Column():
|
132 |
+
gr.Markdown("# Generate\nType a prompt and hit Go. Dendrokronos will generate an invisibly-watermarked image. \nYou can click the download button to save the finished image. Try it with the detector.")
|
133 |
+
with gr.Group():
|
134 |
+
with gr.Row():
|
135 |
+
gen_in = gr.Textbox(max_lines=1, show_label=False, scale=4)
|
136 |
+
gen_btn = gr.Button("Go", variant="primary", scale=0)
|
137 |
+
gen_out = gr.Image(interactive=False, show_label=False)
|
138 |
+
gen_btn.click(fn=manager, inputs=gen_in, outputs=gen_out)
|
139 |
+
with gr.Column():
|
140 |
+
gr.Markdown("# Detect\nUpload an image and hit Detect. Dendrokronos will predict the probability it was watermarked. \nNote: Dendrokronos can only detect its own watermark. It won't detect other AIs, such as DALL-E.")
|
141 |
+
det_out = gr.Label(show_label=False)
|
142 |
+
with gr.Group():
|
143 |
+
det_btn = gr.Button("Detect", variant="primary")
|
144 |
+
det_in = gr.Image(interactive=True, sources=["upload","clipboard"], show_label=False)
|
145 |
+
det_btn.click(fn=manager, inputs=det_in, outputs=det_out)
|
146 |
+
with gr.Row():
|
147 |
+
gr.HTML('<center><h1> </h1>Acknowledgements: Dendrokronos uses <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0">SDXL 1.0</a> for the underlying image generation and <a href="https://arxiv.org/abs/2305.20030">research by Yuxin Wen</a> for the watermark technology. Dendrokronos is a project by Devin Gulliver.</center>')
|
148 |
+
|
149 |
+
app.queue()
|
150 |
+
app.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
diffusers
|
3 |
+
accelerate
|