Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,7 +14,6 @@ import umap
|
|
| 14 |
import pandas as pd
|
| 15 |
import matplotlib
|
| 16 |
import matplotlib.pyplot as plt
|
| 17 |
-
from matplotlib.ticker import MaxNLocator
|
| 18 |
from moviepy.editor import VideoFileClip
|
| 19 |
from PIL import Image
|
| 20 |
import gradio as gr
|
|
@@ -22,7 +21,6 @@ import tempfile
|
|
| 22 |
import shutil
|
| 23 |
|
| 24 |
|
| 25 |
-
|
| 26 |
# Suppress TensorFlow warnings
|
| 27 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
| 28 |
import tensorflow as tf
|
|
@@ -119,7 +117,7 @@ def extract_frames(video_path, output_folder, desired_fps, progress_callback=Non
|
|
| 119 |
|
| 120 |
# Report progress
|
| 121 |
if progress_callback:
|
| 122 |
-
progress = min(100, (frame_count / total_frames_to_extract) * 100)
|
| 123 |
progress_callback(progress, f"Extracting frame")
|
| 124 |
|
| 125 |
if frame_count >= total_frames_to_extract:
|
|
@@ -485,9 +483,6 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
| 485 |
except Exception as e:
|
| 486 |
return f"Error generating plots: {str(e)}", None, None, None, None, None, None, None, None, None
|
| 487 |
|
| 488 |
-
# Get a random face sample
|
| 489 |
-
face_sample = get_random_face_sample(organized_faces_folder, largest_cluster, output_folder)
|
| 490 |
-
|
| 491 |
progress(1.0, "Preparing results")
|
| 492 |
results = f"Top {num_anomalies} anomalies (All Features):\n"
|
| 493 |
results += "\n".join([f"{score:.4f} at {timecode}" for score, timecode in
|
|
@@ -501,17 +496,19 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
| 501 |
results += f"\n\nTop {num_anomalies} {emotion.capitalize()} Scores:\n"
|
| 502 |
results += "\n".join([f"{df[emotion].iloc[i]:.4f} at {df['Timecode'].iloc[i]}" for i in top_indices])
|
| 503 |
|
|
|
|
|
|
|
|
|
|
| 504 |
return (
|
| 505 |
-
results,
|
| 506 |
-
face_sample, # Random face sample image
|
| 507 |
anomaly_plot_all,
|
| 508 |
anomaly_plot_comp,
|
| 509 |
-
*emotion_plots
|
|
|
|
| 510 |
)
|
| 511 |
|
| 512 |
-
# Gradio interface
|
| 513 |
-
|
| 514 |
|
|
|
|
| 515 |
iface = gr.Interface(
|
| 516 |
fn=process_video,
|
| 517 |
inputs=[
|
|
|
|
| 14 |
import pandas as pd
|
| 15 |
import matplotlib
|
| 16 |
import matplotlib.pyplot as plt
|
|
|
|
| 17 |
from moviepy.editor import VideoFileClip
|
| 18 |
from PIL import Image
|
| 19 |
import gradio as gr
|
|
|
|
| 21 |
import shutil
|
| 22 |
|
| 23 |
|
|
|
|
| 24 |
# Suppress TensorFlow warnings
|
| 25 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
| 26 |
import tensorflow as tf
|
|
|
|
| 117 |
|
| 118 |
# Report progress
|
| 119 |
if progress_callback:
|
| 120 |
+
progress = min(100, (frame_count / total_frames_to_extract) * 100)
|
| 121 |
progress_callback(progress, f"Extracting frame")
|
| 122 |
|
| 123 |
if frame_count >= total_frames_to_extract:
|
|
|
|
| 483 |
except Exception as e:
|
| 484 |
return f"Error generating plots: {str(e)}", None, None, None, None, None, None, None, None, None
|
| 485 |
|
|
|
|
|
|
|
|
|
|
| 486 |
progress(1.0, "Preparing results")
|
| 487 |
results = f"Top {num_anomalies} anomalies (All Features):\n"
|
| 488 |
results += "\n".join([f"{score:.4f} at {timecode}" for score, timecode in
|
|
|
|
| 496 |
results += f"\n\nTop {num_anomalies} {emotion.capitalize()} Scores:\n"
|
| 497 |
results += "\n".join([f"{df[emotion].iloc[i]:.4f} at {df['Timecode'].iloc[i]}" for i in top_indices])
|
| 498 |
|
| 499 |
+
# Get a random face sample
|
| 500 |
+
face_sample = get_random_face_sample(organized_faces_folder, largest_cluster, output_folder)
|
| 501 |
+
|
| 502 |
return (
|
| 503 |
+
results,
|
|
|
|
| 504 |
anomaly_plot_all,
|
| 505 |
anomaly_plot_comp,
|
| 506 |
+
*emotion_plots,
|
| 507 |
+
face_sample
|
| 508 |
)
|
| 509 |
|
|
|
|
|
|
|
| 510 |
|
| 511 |
+
# Gradio interface
|
| 512 |
iface = gr.Interface(
|
| 513 |
fn=process_video,
|
| 514 |
inputs=[
|