Spaces:
Paused
Paused
IceClear
commited on
Commit
·
17caf25
1
Parent(s):
7a45a2b
update
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@
|
|
11 |
# // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
# // See the License for the specific language governing permissions and
|
13 |
# // limitations under the License.
|
14 |
-
|
15 |
|
16 |
import os
|
17 |
import torch
|
@@ -128,6 +128,7 @@ def configure_sequence_parallel(sp_size):
|
|
128 |
if sp_size > 1:
|
129 |
init_sequence_parallel(sp_size)
|
130 |
|
|
|
131 |
def configure_runner(sp_size):
|
132 |
config_path = os.path.join('./configs_3b', 'main.yaml')
|
133 |
config = load_config(config_path)
|
@@ -143,6 +144,7 @@ def configure_runner(sp_size):
|
|
143 |
runner.vae.set_memory_limit(**runner.config.vae.memory_limit)
|
144 |
return runner
|
145 |
|
|
|
146 |
def generation_step(runner, text_embeds_dict, cond_latents):
|
147 |
def _move_to_cuda(x):
|
148 |
return [i.to(get_device()) for i in x]
|
@@ -199,7 +201,7 @@ def generation_step(runner, text_embeds_dict, cond_latents):
|
|
199 |
|
200 |
return samples
|
201 |
|
202 |
-
@GPU
|
203 |
def generation_loop(video_path='./test_videos', output_dir='./results', seed=666, batch_size=1, cfg_scale=1.0, cfg_rescale=0.0, sample_steps=1, res_h=1280, res_w=720, sp_size=1):
|
204 |
runner = configure_runner(1)
|
205 |
output_dir = 'output/out.mp4'
|
|
|
11 |
# // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
# // See the License for the specific language governing permissions and
|
13 |
# // limitations under the License.
|
14 |
+
import spaces
|
15 |
|
16 |
import os
|
17 |
import torch
|
|
|
128 |
if sp_size > 1:
|
129 |
init_sequence_parallel(sp_size)
|
130 |
|
131 |
+
@spaces.GPU(duration=120)
|
132 |
def configure_runner(sp_size):
|
133 |
config_path = os.path.join('./configs_3b', 'main.yaml')
|
134 |
config = load_config(config_path)
|
|
|
144 |
runner.vae.set_memory_limit(**runner.config.vae.memory_limit)
|
145 |
return runner
|
146 |
|
147 |
+
@spaces.GPU(duration=120)
|
148 |
def generation_step(runner, text_embeds_dict, cond_latents):
|
149 |
def _move_to_cuda(x):
|
150 |
return [i.to(get_device()) for i in x]
|
|
|
201 |
|
202 |
return samples
|
203 |
|
204 |
+
@spaces.GPU(duration=120)
|
205 |
def generation_loop(video_path='./test_videos', output_dir='./results', seed=666, batch_size=1, cfg_scale=1.0, cfg_rescale=0.0, sample_steps=1, res_h=1280, res_w=720, sp_size=1):
|
206 |
runner = configure_runner(1)
|
207 |
output_dir = 'output/out.mp4'
|