|
from PIL import Image |
|
import gradio as gr |
|
from huggingface_hub import hf_hub_download |
|
from model import Model |
|
from app_canny import create_demo as create_demo_canny |
|
from app_depth import create_demo as create_demo_depth |
|
import os |
|
import torch |
|
|
|
import subprocess |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("Torch version:", torch.__version__) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt_folder = './checkpoints' |
|
t5_folder = os.path.join(ckpt_folder, "flan-t5-xl/flan-t5-xl") |
|
hf_hub_download(repo_id="google/flan-t5-xl", filename="config.json", local_dir=t5_folder) |
|
hf_hub_download(repo_id="google/flan-t5-xl", filename="pytorch_model-00001-of-00002.bin", local_dir=t5_folder) |
|
hf_hub_download(repo_id="google/flan-t5-xl", filename="pytorch_model-00002-of-00002.bin", local_dir=t5_folder) |
|
hf_hub_download(repo_id="google/flan-t5-xl", filename="pytorch_model.bin.index.json", local_dir=t5_folder) |
|
hf_hub_download(repo_id="google/flan-t5-xl", filename="special_tokens_map.json", local_dir=t5_folder) |
|
hf_hub_download(repo_id="google/flan-t5-xl", filename="spiece.model", local_dir=t5_folder) |
|
hf_hub_download(repo_id="google/flan-t5-xl", filename="tokenizer_config.json", local_dir=t5_folder) |
|
|
|
hf_hub_download(repo_id="lllyasviel/Annotators", filename="dpt_hybrid-midas-501f0c75.pt", local_dir=ckpt_folder) |
|
|
|
hf_hub_download(repo_id="wondervictor/ControlAR", filename="canny_MR.safetensors", local_dir=ckpt_folder) |
|
hf_hub_download(repo_id="wondervictor/ControlAR", filename="depth_MR.safetensors", local_dir=ckpt_folder) |
|
|
|
|
|
DESCRIPTION = "# [ControlAR: Controllable Image Generation with Autoregressive Models](https://arxiv.org/abs/2410.02705) \n ### The first row in outputs is the input image and condition. The second row is the images generated by ControlAR. \n ### You can run locally by following the instruction on our [Github Repo](https://github.com/hustvl/ControlAR)." |
|
SHOW_DUPLICATE_BUTTON = os.getenv("SHOW_DUPLICATE_BUTTON") == "1" |
|
model = Model() |
|
device = "cuda" |
|
model.to(device) |
|
with gr.Blocks(css="style.css") as demo: |
|
gr.Markdown(DESCRIPTION) |
|
gr.DuplicateButton( |
|
value="Duplicate Space for private use", |
|
elem_id="duplicate-button", |
|
visible=SHOW_DUPLICATE_BUTTON, |
|
) |
|
with gr.Tabs(): |
|
with gr.TabItem("Depth"): |
|
create_demo_depth(model.process_depth) |
|
with gr.TabItem("Canny"): |
|
create_demo_canny(model.process_canny) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(share=False) |
|
|