Spaces:
Runtime error
Runtime error
| pip install torch==1.9.0+cu111 torchtext==0.10.0 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch/ -f https://download.pytorch.org/whl/torchvision/ | |
| git clone https://github.com/openai/CLIP | |
| # !pip install taming-transformers | |
| git clone https://github.com/CompVis/taming-transformers.git | |
| rm -Rf pixray | |
| git clone https://github.com/dribnet/pixray | |
| pip install ftfy regex tqdm omegaconf pytorch-lightning | |
| pip install kornia==0.6.1 | |
| pip install imageio-ffmpeg | |
| pip install einops | |
| pip install torch-optimizer | |
| pip install easydict | |
| pip install braceexpand | |
| pip install git+https://github.com/pvigier/perlin-numpy | |
| mkdir steps | |
| mkdir models | |
| wget https://user-images.githubusercontent.com/945979/126260797-adc60317-9518-40de-8700-b1f93e81e0ec.png -O this_is_fine.png | |
| wget https://user-images.githubusercontent.com/945979/126415385-d70ff2b0-f021-4238-9621-6180d33b242c.jpg -O perfume.jpg | |
| pip install gradio==2.5.1 | |
| import sys | |
| sys.path.append("pixray") | |
| import gradio as gr | |
| import torch | |
| import pixray | |
| # Define the main function | |
| def generate(prompt, quality, style, aspect): | |
| torch.cuda.empty_cache() | |
| pixray.reset_settings() | |
| # use_pixeldraw = (style == 'pixel art') | |
| # use_clipdraw = (style == 'painting') | |
| pixray.add_settings(prompts=prompt, | |
| aspect=aspect, | |
| quality=quality, | |
| make_video=True) | |
| settings = pixray.apply_settings() | |
| pixray.do_init(settings) | |
| pixray.do_run(settings) | |
| return 'output.png', 'output.mp4' | |
| # Create the UI | |
| prompt = gr.inputs.Textbox(default="Underwater city", label="Text Prompt") | |
| quality = gr.inputs.Radio(choices=['draft', 'normal', 'better'], label="Quality") | |
| # style = gr.inputs.Radio(choices=['image', 'painting','pixel art'], label="Type") | |
| aspect = gr.inputs.Radio(choices=['square', 'widescreen','portrait'], label="Size") | |
| # Launch the demo | |
| iface = gr.Interface(generate, inputs=[prompt, quality, aspect], outputs=['image', 'video'], enable_queue=True, live=False) | |
| iface.launch(debug=True) |