Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
9a6c912
0
Parent(s):
Duplicate from diffusers/sd-to-diffusers
Browse files- .gitattributes +34 -0
- README.md +14 -0
- app.py +36 -0
- convert.py +95 -0
- hf_utils.py +50 -0
- requirements.txt +7 -0
- utils.py +6 -0
.gitattributes
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: SD To Diffusers
|
| 3 |
+
emoji: 🎨➡️🧨
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.31.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: true
|
| 10 |
+
license: mit
|
| 11 |
+
duplicated_from: diffusers/sd-to-diffusers
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
from convert import convert
|
| 4 |
+
|
| 5 |
+
DESCRIPTION = """
|
| 6 |
+
The steps are the following:
|
| 7 |
+
|
| 8 |
+
- Paste a read-access token from hf.co/settings/tokens. Read access is enough given that we will open a PR against the source repo.
|
| 9 |
+
- Input a model id from the Hub
|
| 10 |
+
- Input the filename from the root dir of the repo that you would like to convert, e.g. 'v2-1_768-ema-pruned.ckpt' or 'v1-5-pruned.safetensors'
|
| 11 |
+
- Chose which Stable Diffusion version, image size, scheduler type the model has and whether you want the "ema", or "non-ema" weights.
|
| 12 |
+
- Click "Submit"
|
| 13 |
+
- That's it! You'll get feedback if it works or not, and if it worked, you'll get the URL of the opened PR 🔥
|
| 14 |
+
|
| 15 |
+
⚠️ If you encounter weird error messages, please have a look into the Logs and feel free to open a PR to correct the error messages.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
demo = gr.Interface(
|
| 19 |
+
title="Convert any Stable Diffusion checkpoint to Diffusers and open a PR",
|
| 20 |
+
description=DESCRIPTION,
|
| 21 |
+
allow_flagging="never",
|
| 22 |
+
article="Check out the [Diffusers repo on GitHub](https://github.com/huggingface/diffusers)",
|
| 23 |
+
inputs=[
|
| 24 |
+
gr.Text(max_lines=1, label="your_hf_token"),
|
| 25 |
+
gr.Text(max_lines=1, label="model_id"),
|
| 26 |
+
gr.Text(max_lines=1, label="filename"),
|
| 27 |
+
gr.Radio(label="Model type", choices=["v1", "v2", "ControlNet"]),
|
| 28 |
+
gr.Radio(label="Sample size (px)", choices=[512, 768]),
|
| 29 |
+
gr.Radio(label="Scheduler type", choices=["pndm", "heun", "euler", "dpm", "ddim"], value="dpm"),
|
| 30 |
+
gr.Radio(label="Extract EMA or non-EMA?", choices=["ema", "non-ema"], value="ema"),
|
| 31 |
+
],
|
| 32 |
+
outputs=[gr.Markdown(label="output")],
|
| 33 |
+
fn=convert,
|
| 34 |
+
).queue(max_size=10, concurrency_count=1)
|
| 35 |
+
|
| 36 |
+
demo.launch(show_api=True)
|
convert.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
import os
|
| 4 |
+
import shutil
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from tempfile import TemporaryDirectory
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from io import BytesIO
|
| 11 |
+
|
| 12 |
+
from huggingface_hub import CommitInfo, Discussion, HfApi, hf_hub_download
|
| 13 |
+
from huggingface_hub.file_download import repo_folder_name
|
| 14 |
+
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
|
| 15 |
+
download_from_original_stable_diffusion_ckpt, download_controlnet_from_original_ckpt
|
| 16 |
+
)
|
| 17 |
+
from transformers import CONFIG_MAPPING
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
COMMIT_MESSAGE = " This PR adds fp32 and fp16 weights in PyTorch and safetensors format to {}"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def convert_single(model_id: str, filename: str, model_type: str, sample_size: int, scheduler_type: str, extract_ema: bool, folder: str, progress):
|
| 24 |
+
from_safetensors = filename.endswith(".safetensors")
|
| 25 |
+
|
| 26 |
+
progress(0, desc="Downloading model")
|
| 27 |
+
local_file = os.path.join(model_id, filename)
|
| 28 |
+
ckpt_file = local_file if os.path.isfile(local_file) else hf_hub_download(repo_id=model_id, filename=filename)
|
| 29 |
+
|
| 30 |
+
if model_type == "v1":
|
| 31 |
+
config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
|
| 32 |
+
elif model_type == "v2":
|
| 33 |
+
if sample_size == 512:
|
| 34 |
+
config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference.yaml"
|
| 35 |
+
else:
|
| 36 |
+
config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
|
| 37 |
+
elif model_type == "ControlNet":
|
| 38 |
+
config_url = (Path(model_id)/"resolve/main"/filename).with_suffix(".yaml")
|
| 39 |
+
config_url = "https://huggingface.co/" + str(config_url)
|
| 40 |
+
|
| 41 |
+
config_file = BytesIO(requests.get(config_url).content)
|
| 42 |
+
|
| 43 |
+
if model_type == "ControlNet":
|
| 44 |
+
progress(0.2, desc="Converting ControlNet Model")
|
| 45 |
+
pipeline = download_controlnet_from_original_ckpt(ckpt_file, config_file, image_size=sample_size, from_safetensors=from_safetensors, extract_ema=extract_ema)
|
| 46 |
+
to_args = {"dtype": torch.float16}
|
| 47 |
+
else:
|
| 48 |
+
progress(0.1, desc="Converting Model")
|
| 49 |
+
pipeline = download_from_original_stable_diffusion_ckpt(ckpt_file, config_file, image_size=sample_size, scheduler_type=scheduler_type, from_safetensors=from_safetensors, extract_ema=extract_ema)
|
| 50 |
+
to_args = {"torch_dtype": torch.float16}
|
| 51 |
+
|
| 52 |
+
pipeline.save_pretrained(folder)
|
| 53 |
+
pipeline.save_pretrained(folder, safe_serialization=True)
|
| 54 |
+
|
| 55 |
+
pipeline = pipeline.to(**to_args)
|
| 56 |
+
pipeline.save_pretrained(folder, variant="fp16")
|
| 57 |
+
pipeline.save_pretrained(folder, safe_serialization=True, variant="fp16")
|
| 58 |
+
|
| 59 |
+
return folder
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def previous_pr(api: "HfApi", model_id: str, pr_title: str) -> Optional["Discussion"]:
|
| 63 |
+
try:
|
| 64 |
+
discussions = api.get_repo_discussions(repo_id=model_id)
|
| 65 |
+
except Exception:
|
| 66 |
+
return None
|
| 67 |
+
for discussion in discussions:
|
| 68 |
+
if discussion.status == "open" and discussion.is_pull_request and discussion.title == pr_title:
|
| 69 |
+
details = api.get_discussion_details(repo_id=model_id, discussion_num=discussion.num)
|
| 70 |
+
if details.target_branch == "refs/heads/main":
|
| 71 |
+
return discussion
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def convert(token: str, model_id: str, filename: str, model_type: str, sample_size: int = 512, scheduler_type: str = "pndm", extract_ema: bool = True, progress=gr.Progress()):
|
| 75 |
+
api = HfApi()
|
| 76 |
+
|
| 77 |
+
pr_title = "Adding `diffusers` weights of this model"
|
| 78 |
+
|
| 79 |
+
with TemporaryDirectory() as d:
|
| 80 |
+
folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
|
| 81 |
+
os.makedirs(folder)
|
| 82 |
+
new_pr = None
|
| 83 |
+
try:
|
| 84 |
+
folder = convert_single(model_id, filename, model_type, sample_size, scheduler_type, extract_ema, folder, progress)
|
| 85 |
+
progress(0.7, desc="Uploading to Hub")
|
| 86 |
+
new_pr = api.upload_folder(folder_path=folder, path_in_repo="./", repo_id=model_id, repo_type="model", token=token, commit_message=pr_title, commit_description=COMMIT_MESSAGE.format(model_id), create_pr=True)
|
| 87 |
+
pr_number = new_pr.split("%2F")[-1].split("/")[0]
|
| 88 |
+
link = f"Pr created at: {'https://huggingface.co/' + os.path.join(model_id, 'discussions', pr_number)}"
|
| 89 |
+
progress(1, desc="Done")
|
| 90 |
+
except Exception as e:
|
| 91 |
+
raise gr.exceptions.Error(str(e))
|
| 92 |
+
finally:
|
| 93 |
+
shutil.rmtree(folder)
|
| 94 |
+
|
| 95 |
+
return link
|
hf_utils.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import get_hf_file_metadata, hf_hub_url, hf_hub_download, scan_cache_dir, whoami, list_models
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def get_my_model_names(token):
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
author = whoami(token=token)
|
| 8 |
+
model_infos = list_models(author=author["name"], use_auth_token=token)
|
| 9 |
+
return [model.modelId for model in model_infos], None
|
| 10 |
+
|
| 11 |
+
except Exception as e:
|
| 12 |
+
return [], e
|
| 13 |
+
|
| 14 |
+
def download_file(repo_id: str, filename: str, token: str):
|
| 15 |
+
"""Download a file from a repo on the Hugging Face Hub.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
file_path (:obj:`str`): The path to the downloaded file.
|
| 19 |
+
revision (:obj:`str`): The commit hash of the file.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
md = get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename=filename), token=token)
|
| 23 |
+
revision = md.commit_hash
|
| 24 |
+
|
| 25 |
+
file_path = hf_hub_download(repo_id=repo_id, filename=filename, revision=revision, token=token)
|
| 26 |
+
|
| 27 |
+
return file_path, revision
|
| 28 |
+
|
| 29 |
+
def delete_file(revision: str):
|
| 30 |
+
"""Delete a file from local cache.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
revision (:obj:`str`): The commit hash of the file.
|
| 34 |
+
Returns:
|
| 35 |
+
None
|
| 36 |
+
"""
|
| 37 |
+
scan_cache_dir().delete_revisions(revision).execute()
|
| 38 |
+
|
| 39 |
+
def get_pr_url(api, repo_id, title):
|
| 40 |
+
try:
|
| 41 |
+
discussions = api.get_repo_discussions(repo_id=repo_id)
|
| 42 |
+
except Exception:
|
| 43 |
+
return None
|
| 44 |
+
for discussion in discussions:
|
| 45 |
+
if (
|
| 46 |
+
discussion.status == "open"
|
| 47 |
+
and discussion.is_pull_request
|
| 48 |
+
and discussion.title == title
|
| 49 |
+
):
|
| 50 |
+
return f"https://huggingface.co/{repo_id}/discussions/{discussion.num}"
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
huggingface_hub
|
| 2 |
+
safetensors
|
| 3 |
+
transformers
|
| 4 |
+
accelerate
|
| 5 |
+
git+https://github.com/huggingface/diffusers
|
| 6 |
+
omegaconf
|
| 7 |
+
pytorch_lightning
|
utils.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def is_google_colab():
|
| 2 |
+
try:
|
| 3 |
+
import google.colab
|
| 4 |
+
return True
|
| 5 |
+
except:
|
| 6 |
+
return False
|