Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,12 +6,10 @@ import tempfile
|
|
| 6 |
import huggingface_hub
|
| 7 |
import subprocess
|
| 8 |
import threading
|
| 9 |
-
from tqdm import tqdm
|
| 10 |
|
| 11 |
-
def stream_output(pipe
|
| 12 |
for line in iter(pipe.readline, ''):
|
| 13 |
print(line, end='')
|
| 14 |
-
progress_bar.update(1)
|
| 15 |
pipe.close()
|
| 16 |
|
| 17 |
HF_TKN = os.environ.get("GATED_HF_TOKEN")
|
|
@@ -80,7 +78,7 @@ def check_for_mp4_in_outputs():
|
|
| 80 |
else:
|
| 81 |
return None
|
| 82 |
|
| 83 |
-
def infer(
|
| 84 |
# check if 'outputs' dir exists and empty it if necessary
|
| 85 |
check_outputs_folder('./outputs')
|
| 86 |
|
|
@@ -135,12 +133,9 @@ def infer(ref_video_in, ref_image_in, progress=gr.Progress(track_tqdm=True)):
|
|
| 135 |
command = ['python', 'inference.py', '--inference_config', file_path]
|
| 136 |
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1)
|
| 137 |
|
| 138 |
-
# Create a tqdm progress bar
|
| 139 |
-
progress_bar = tqdm(total=100, desc="Inference Progress", unit="line", ncols=100, leave=True)
|
| 140 |
-
|
| 141 |
# Create threads to handle stdout and stderr
|
| 142 |
-
stdout_thread = threading.Thread(target=stream_output, args=(process.stdout,
|
| 143 |
-
stderr_thread = threading.Thread(target=stream_output, args=(process.stderr,
|
| 144 |
|
| 145 |
|
| 146 |
# Start the threads
|
|
@@ -148,12 +143,9 @@ def infer(ref_video_in, ref_image_in, progress=gr.Progress(track_tqdm=True)):
|
|
| 148 |
stderr_thread.start()
|
| 149 |
|
| 150 |
# Wait for the process to complete and the threads to finish
|
|
|
|
| 151 |
stdout_thread.join()
|
| 152 |
stderr_thread.join()
|
| 153 |
-
process.wait()
|
| 154 |
-
|
| 155 |
-
# Close the progress bar
|
| 156 |
-
progress_bar.close()
|
| 157 |
|
| 158 |
print("Inference script finished with return code:", process.returncode)
|
| 159 |
|
|
@@ -166,10 +158,19 @@ def infer(ref_video_in, ref_image_in, progress=gr.Progress(track_tqdm=True)):
|
|
| 166 |
|
| 167 |
return mp4_file_path
|
| 168 |
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
demo.launch()
|
|
|
|
| 6 |
import huggingface_hub
|
| 7 |
import subprocess
|
| 8 |
import threading
|
|
|
|
| 9 |
|
| 10 |
+
def stream_output(pipe):
|
| 11 |
for line in iter(pipe.readline, ''):
|
| 12 |
print(line, end='')
|
|
|
|
| 13 |
pipe.close()
|
| 14 |
|
| 15 |
HF_TKN = os.environ.get("GATED_HF_TOKEN")
|
|
|
|
| 78 |
else:
|
| 79 |
return None
|
| 80 |
|
| 81 |
+
def infer(ref_image_in, ref_video_in):
|
| 82 |
# check if 'outputs' dir exists and empty it if necessary
|
| 83 |
check_outputs_folder('./outputs')
|
| 84 |
|
|
|
|
| 133 |
command = ['python', 'inference.py', '--inference_config', file_path]
|
| 134 |
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1)
|
| 135 |
|
|
|
|
|
|
|
|
|
|
| 136 |
# Create threads to handle stdout and stderr
|
| 137 |
+
stdout_thread = threading.Thread(target=stream_output, args=(process.stdout,))
|
| 138 |
+
stderr_thread = threading.Thread(target=stream_output, args=(process.stderr,))
|
| 139 |
|
| 140 |
|
| 141 |
# Start the threads
|
|
|
|
| 143 |
stderr_thread.start()
|
| 144 |
|
| 145 |
# Wait for the process to complete and the threads to finish
|
| 146 |
+
process.wait()
|
| 147 |
stdout_thread.join()
|
| 148 |
stderr_thread.join()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
print("Inference script finished with return code:", process.returncode)
|
| 151 |
|
|
|
|
| 158 |
|
| 159 |
return mp4_file_path
|
| 160 |
|
| 161 |
+
with gr.Blocks() as demo:
|
| 162 |
+
with gr.Column():
|
| 163 |
+
with gr.Row():
|
| 164 |
+
with gr.Column():
|
| 165 |
+
with gr.Row():
|
| 166 |
+
ref_image_in = gr.Image(type="filepath")
|
| 167 |
+
ref_video_in = gr.Video()
|
| 168 |
+
submit_btn = gr.Button("Submit")
|
| 169 |
+
output_video = gr.Video()
|
| 170 |
+
submit_btn.click(
|
| 171 |
+
fn = infer,
|
| 172 |
+
inputs = [ref_image_in, ref_video_in],
|
| 173 |
+
outputs = [output_video]
|
| 174 |
+
)
|
| 175 |
|
| 176 |
demo.launch()
|