xiaozhongji commited on
Commit
836ed8b
·
1 Parent(s): fff431e
Files changed (2) hide show
  1. app.py +0 -2
  2. sonic.py +3 -1
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import spaces
2
  import gradio as gr
3
  import os
4
  import numpy as np
@@ -22,7 +21,6 @@ def get_md5(content):
22
  md5 = md5hash.hexdigest()
23
  return md5
24
 
25
- @spaces.GPU(duration=120)
26
  def get_video_res(img_path, audio_path, res_video_path, dynamic_scale=1.0):
27
 
28
  expand_ratio = 0.5
 
 
1
  import gradio as gr
2
  import os
3
  import numpy as np
 
21
  md5 = md5hash.hexdigest()
22
  return md5
23
 
 
24
  def get_video_res(img_path, audio_path, res_video_path, dynamic_scale=1.0):
25
 
26
  expand_ratio = 0.5
sonic.py CHANGED
@@ -19,10 +19,12 @@ from src.models.audio_adapter.audio_proj import AudioProjModel
19
  from src.models.audio_adapter.audio_to_bucket import Audio2bucketModel
20
  from src.utils.RIFE.RIFE_HDv3 import RIFEModel
21
  from src.dataset.face_align.align import AlignImage
 
22
 
23
 
24
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
25
 
 
26
  def test(
27
  pipe,
28
  config,
@@ -254,7 +256,7 @@ class Sonic():
254
  crop_image = face_image[crop_bbox[1]:crop_bbox[3], crop_bbox[0]:crop_bbox[2]]
255
  cv2.imwrite(output_image_path, crop_image)
256
 
257
- @torch.no_grad()
258
  def process(self,
259
  image_path,
260
  audio_path,
 
19
  from src.models.audio_adapter.audio_to_bucket import Audio2bucketModel
20
  from src.utils.RIFE.RIFE_HDv3 import RIFEModel
21
  from src.dataset.face_align.align import AlignImage
22
+ import spaces
23
 
24
 
25
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
26
 
27
+ @torch.no_grad()
28
  def test(
29
  pipe,
30
  config,
 
256
  crop_image = face_image[crop_bbox[1]:crop_bbox[3], crop_bbox[0]:crop_bbox[2]]
257
  cv2.imwrite(output_image_path, crop_image)
258
 
259
+ @spaces.GPU(duration=120)
260
  def process(self,
261
  image_path,
262
  audio_path,