import os import sys #clone https://github.com/openai/CLIP os.system("git clone https://github.com/openai/CLIP") os.system("git clone https://github.com/CompVis/taming-transformers.git") # !pip install taming-transformers #clone https://github.com/CompVis/taming-transformers.git os.system("git clone https://github.com/dribnet/pixray") import gradio as gr import torch sys.path.append("pixray") import pixray # Define the main function def generate(prompt, quality, aspect): torch.cuda.empty_cache() pixray.reset_settings() # use_pixeldraw = (style == 'pixel art') # use_clipdraw = (style == 'painting') pixray.add_settings(prompts=prompt, aspect=aspect, quality=quality, make_video=True) settings = pixray.apply_settings() pixray.do_init(settings) pixray.do_run(settings) return 'output.png', 'output.mp4' # Create the UI prompt = gr.inputs.Textbox(default="Underwater city", label="Text Prompt") quality = gr.inputs.Radio(choices=['draft', 'normal', 'better'], label="Quality") # style = gr.inputs.Radio(choices=['image', 'painting','pixel art'], label="Type") aspect = gr.inputs.Radio(choices=['square', 'widescreen','portrait'], label="Size") # Launch the demo iface = gr.Interface(generate, inputs=[prompt, quality, aspect], outputs=['image', 'video'], enable_queue=True, live=False) iface.launch(debug=False)