File size: 1,439 Bytes
61e2e51
 
 
 
5c9ae71
cd12599
a938ce6
 
c132c65
cd12599
a938ce6
5c9ae71
 
88cd373
5c9ae71
 
c132c65
39c6d42
c8f0bd6
39c6d42
 
 
3f364c4
 
39c6d42
 
 
 
 
 
 
 
 
 
 
 
 
 
3f364c4
39c6d42
 
 
3f364c4
c687a70
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import os
import sys



#clone https://github.com/openai/CLIP
os.system("git clone https://github.com/openai/CLIP")
os.system("git clone https://github.com/CompVis/taming-transformers.git")
# !pip install taming-transformers
#clone https://github.com/CompVis/taming-transformers.git
os.system("git clone https://github.com/dribnet/pixray")
import gradio as gr
import torch
sys.path.append("pixray")
import pixray


# Define the main function
def generate(prompt, quality, aspect):
    torch.cuda.empty_cache()
    pixray.reset_settings()
    
    # use_pixeldraw = (style == 'pixel art')
    # use_clipdraw = (style == 'painting')
    pixray.add_settings(prompts=prompt,
                        aspect=aspect,
                        quality=quality,
                        make_video=True)
  
    settings = pixray.apply_settings()
    pixray.do_init(settings)
    pixray.do_run(settings)

    return 'output.png', 'output.mp4'

# Create the UI
prompt = gr.inputs.Textbox(default="Underwater city", label="Text Prompt")
quality = gr.inputs.Radio(choices=['draft', 'normal', 'better'], label="Quality")
# style = gr.inputs.Radio(choices=['image', 'painting','pixel art'], label="Type")
aspect = gr.inputs.Radio(choices=['square', 'widescreen','portrait'], label="Size")

# Launch the demo
iface = gr.Interface(generate, inputs=[prompt, quality, aspect], outputs=['image', 'video'], enable_queue=True, live=False)
iface.launch(debug=False)