llmlocal commited on
Commit
9c1c91e
·
1 Parent(s): 6ef652d

update config to support gradio 5

Browse files
Files changed (2) hide show
  1. app.py +68 -80
  2. requirements.txt +6 -4
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
  import torch
3
  import os
4
- import spaces
5
  import uuid
6
 
7
  from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
@@ -11,11 +10,6 @@ from safetensors.torch import load_file
11
  from PIL import Image
12
  from transformers import CLIPFeatureExtractor
13
 
14
- # Custom message for linking to other demos
15
- MORE_INFO = """ ## Related Demos
16
- ### Check out other interesting demos here
17
- """
18
-
19
  # Model configuration
20
  BASES = {
21
  "Cartoon": "frankjoshua/toonyou_beta6",
@@ -53,7 +47,6 @@ pipe.scheduler = EulerDiscreteScheduler.from_config(
53
  feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
54
 
55
  # Main generation function
56
- @spaces.GPU(duration=30, queue=False)
57
  def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
58
  global step_loaded
59
  global base_loaded
@@ -90,17 +83,13 @@ def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Prog
90
  motion_loaded = motion
91
 
92
  # Progress tracking
93
- progress((0, step))
94
- def progress_callback(i, t, z):
95
- progress((i+1, step))
96
 
97
  # Generate the video
98
  output = pipe(
99
  prompt=prompt,
100
  guidance_scale=1.2,
101
- num_inference_steps=step,
102
- callback=progress_callback,
103
- callback_steps=1
104
  )
105
 
106
  # Save and return the video
@@ -110,81 +99,80 @@ def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Prog
110
  return path
111
 
112
  # Gradio Interface
113
- with gr.Blocks(css="style.css") as demo:
114
- # Header
115
- gr.HTML(
116
  """
117
- <h1><center>Instant⚡Video Generator</center></h1>
118
- <p><center><span class='warning'>Adjust steps from 4 to 8 for better results if needed.</span></center></p>
119
- <p><center><strong>First generation takes longer, subsequent generations are faster.</strong></p>
120
- <p><center>Follow the example prompts format for best results</p>
 
 
121
  """
122
  )
123
 
124
- # Input interface
125
- with gr.Group():
126
- with gr.Row():
127
- prompt = gr.Textbox(label='Prompt')
128
-
129
- with gr.Row():
130
- select_base = gr.Dropdown(
131
- label='Base model',
132
- choices=list(BASES.keys()),
133
- value=base_loaded,
134
- interactive=True
135
- )
136
-
137
- select_motion = gr.Dropdown(
138
- label='Motion',
139
- choices=[
140
- ("Default", ""),
141
- ("Zoom in", "guoyww/animatediff-motion-lora-zoom-in"),
142
- ("Zoom out", "guoyww/animatediff-motion-lora-zoom-out"),
143
- ("Tilt up", "guoyww/animatediff-motion-lora-tilt-up"),
144
- ("Tilt down", "guoyww/animatediff-motion-lora-tilt-down"),
145
- ("Pan left", "guoyww/animatediff-motion-lora-pan-left"),
146
- ("Pan right", "guoyww/animatediff-motion-lora-pan-right"),
147
- ("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
148
- ("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
149
- ],
150
- value="guoyww/animatediff-motion-lora-zoom-in",
151
- interactive=True
152
  )
153
 
154
- select_step = gr.Dropdown(
155
- label='Inference steps',
156
- choices=[
157
- ('1-Step', 1),
158
- ('2-Step', 2),
159
- ('4-Step', 4),
160
- ('8-Step', 8),
161
- ],
162
- value=4,
163
- interactive=True
164
- )
165
-
166
- submit = gr.Button(
167
- scale=1,
168
- variant='primary'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  )
170
 
171
- # Output video display
172
- video = gr.Video(
173
- label='Generated Animation',
174
- autoplay=True,
175
- height=512,
176
- width=512,
177
- elem_id="video_output"
178
  )
179
 
180
- # Event handlers
181
- gr.on(
182
- triggers=[submit.click, prompt.submit],
183
  fn=generate_image,
184
  inputs=[prompt, select_base, select_motion, select_step],
185
- outputs=[video],
186
- api_name="instant_video",
187
- queue=False
188
  )
189
 
190
  # Example prompts
@@ -199,11 +187,11 @@ with gr.Blocks(css="style.css") as demo:
199
  ["Focus: Kids Playing (Season: Winter)"],
200
  ["Focus: Cars in Street (Season: Rain, Daytime) (Shot from Distance) (Movement: Cars running)"]
201
  ],
 
 
202
  fn=generate_image,
203
- inputs=[prompt],
204
- outputs=[video],
205
- cache_examples="lazy",
206
  )
207
 
208
  # Launch the interface
209
- demo.queue().launch()
 
1
  import gradio as gr
2
  import torch
3
  import os
 
4
  import uuid
5
 
6
  from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
 
10
  from PIL import Image
11
  from transformers import CLIPFeatureExtractor
12
 
 
 
 
 
 
13
  # Model configuration
14
  BASES = {
15
  "Cartoon": "frankjoshua/toonyou_beta6",
 
47
  feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
48
 
49
  # Main generation function
 
50
  def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
51
  global step_loaded
52
  global base_loaded
 
83
  motion_loaded = motion
84
 
85
  # Progress tracking
86
+ progress(0, desc="Starting generation...")
 
 
87
 
88
  # Generate the video
89
  output = pipe(
90
  prompt=prompt,
91
  guidance_scale=1.2,
92
+ num_inference_steps=step
 
 
93
  )
94
 
95
  # Save and return the video
 
99
  return path
100
 
101
  # Gradio Interface
102
+ with gr.Blocks(theme=gr.themes.Default()) as demo:
103
+ gr.Markdown(
 
104
  """
105
+ # Instant⚡Video Generator
106
+
107
+ > **Note**: Adjust steps from 4 to 8 for better results if needed.
108
+
109
+ First generation takes longer, subsequent generations are faster.
110
+ Follow the example prompts format for best results.
111
  """
112
  )
113
 
114
+ with gr.Row():
115
+ with gr.Column(scale=4):
116
+ prompt = gr.Textbox(
117
+ label="Enter your prompt",
118
+ placeholder="Focus: Describe what you want to animate..."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  )
120
 
121
+ with gr.Row():
122
+ select_base = gr.Dropdown(
123
+ label='Base model',
124
+ choices=list(BASES.keys()),
125
+ value=base_loaded
126
+ )
127
+
128
+ select_motion = gr.Dropdown(
129
+ label='Motion Style',
130
+ choices=[
131
+ ("Default", ""),
132
+ ("Zoom in", "guoyww/animatediff-motion-lora-zoom-in"),
133
+ ("Zoom out", "guoyww/animatediff-motion-lora-zoom-out"),
134
+ ("Tilt up", "guoyww/animatediff-motion-lora-tilt-up"),
135
+ ("Tilt down", "guoyww/animatediff-motion-lora-tilt-down"),
136
+ ("Pan left", "guoyww/animatediff-motion-lora-pan-left"),
137
+ ("Pan right", "guoyww/animatediff-motion-lora-pan-right"),
138
+ ("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
139
+ ("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
140
+ ],
141
+ value=""
142
+ )
143
+
144
+ select_step = gr.Dropdown(
145
+ label='Quality Steps',
146
+ choices=[
147
+ ('1-Step (Fastest)', 1),
148
+ ('2-Step (Fast)', 2),
149
+ ('4-Step (Balanced)', 4),
150
+ ('8-Step (Best)', 8),
151
+ ],
152
+ value=4
153
+ )
154
+
155
+ generate_btn = gr.Button("Generate Video", variant="primary")
156
+
157
+ with gr.Column(scale=6):
158
+ video_output = gr.Video(
159
+ label="Generated Animation",
160
+ height=512,
161
+ width=512,
162
+ autoplay=True
163
  )
164
 
165
+ # Event handlers
166
+ generate_btn.click(
167
+ fn=generate_image,
168
+ inputs=[prompt, select_base, select_motion, select_step],
169
+ outputs=video_output
 
 
170
  )
171
 
172
+ prompt.submit(
 
 
173
  fn=generate_image,
174
  inputs=[prompt, select_base, select_motion, select_step],
175
+ outputs=video_output
 
 
176
  )
177
 
178
  # Example prompts
 
187
  ["Focus: Kids Playing (Season: Winter)"],
188
  ["Focus: Cars in Street (Season: Rain, Daytime) (Shot from Distance) (Movement: Cars running)"]
189
  ],
190
+ inputs=prompt,
191
+ outputs=video_output,
192
  fn=generate_image,
193
+ cache_examples=True,
 
 
194
  )
195
 
196
  # Launch the interface
197
+ demo.launch()
requirements.txt CHANGED
@@ -1,8 +1,10 @@
1
- gradio==4.16.0
2
- torch>=2.0.0
 
 
3
  diffusers==0.25.0
4
  transformers==4.36.2
 
5
  safetensors==0.4.1
6
  huggingface_hub==0.20.3
7
- Pillow==10.2.0
8
- spaces==0.19.3
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu118
2
+ gradio==5.16.2
3
+ torch==2.0.1+cu118
4
+ torchvision==0.15.2+cu118
5
  diffusers==0.25.0
6
  transformers==4.36.2
7
+ accelerate==0.25.0
8
  safetensors==0.4.1
9
  huggingface_hub==0.20.3
10
+ Pillow==10.2.0