Spaces:
Sleeping
Sleeping
devjas1
commited on
Commit
·
edc1cf7
1
Parent(s):
846d13f
(feat)[Performance Tracking]: Update performance metrics logging and cleanup; add tracking database to .gitignore
Browse files- .gitignore +1 -0
- core_logic.py +14 -9
- outputs/performance_tracking.db +0 -0
.gitignore
CHANGED
|
@@ -26,3 +26,4 @@ datasets/**
|
|
| 26 |
# ---------------------------------------
|
| 27 |
|
| 28 |
__pycache__.py
|
|
|
|
|
|
| 26 |
# ---------------------------------------
|
| 27 |
|
| 28 |
__pycache__.py
|
| 29 |
+
outputs/performance_tracking.db
|
core_logic.py
CHANGED
|
@@ -10,6 +10,7 @@ import numpy as np
|
|
| 10 |
import streamlit as st
|
| 11 |
from pathlib import Path
|
| 12 |
from config import SAMPLE_DATA_DIR
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
def label_file(filename: str) -> int:
|
|
@@ -108,7 +109,7 @@ def run_inference(y_resampled, model_choice, _cache_key=None):
|
|
| 108 |
start_time = time.time()
|
| 109 |
start_memory = _get_memory_usage()
|
| 110 |
|
| 111 |
-
model.eval()
|
| 112 |
with torch.no_grad():
|
| 113 |
if model is None:
|
| 114 |
raise ValueError(
|
|
@@ -131,32 +132,36 @@ def run_inference(y_resampled, model_choice, _cache_key=None):
|
|
| 131 |
metrics = PerformanceMetrics(
|
| 132 |
model_name=model_choice,
|
| 133 |
prediction_time=inference_time,
|
| 134 |
-
preprocessing_time=0.0,
|
| 135 |
total_time=inference_time,
|
| 136 |
memory_usage_mb=memory_usage,
|
| 137 |
accuracy=None, # Will be updated if ground truth is available
|
| 138 |
confidence=confidence,
|
| 139 |
-
timestamp=datetime.
|
| 140 |
-
input_size=
|
|
|
|
|
|
|
| 141 |
modality=modality,
|
| 142 |
)
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
|
|
|
| 146 |
print(f"Performance tracking failed: {e}")
|
| 147 |
|
| 148 |
cleanup_memory()
|
| 149 |
return prediction, logits_list, probs, inference_time, logits
|
| 150 |
|
|
|
|
| 151 |
def _get_memory_usage() -> float:
|
| 152 |
"""Get current memory usage in MB"""
|
| 153 |
try:
|
| 154 |
import psutil
|
|
|
|
| 155 |
process = psutil.Process()
|
| 156 |
-
return process.memory_info().rss / 1024 / 1024
|
| 157 |
except ImportError:
|
| 158 |
return 0.0 # psutil not available
|
| 159 |
-
|
| 160 |
|
| 161 |
|
| 162 |
@st.cache_data
|
|
|
|
| 10 |
import streamlit as st
|
| 11 |
from pathlib import Path
|
| 12 |
from config import SAMPLE_DATA_DIR
|
| 13 |
+
from datetime import datetime
|
| 14 |
|
| 15 |
|
| 16 |
def label_file(filename: str) -> int:
|
|
|
|
| 109 |
start_time = time.time()
|
| 110 |
start_memory = _get_memory_usage()
|
| 111 |
|
| 112 |
+
model.eval() # type: ignore
|
| 113 |
with torch.no_grad():
|
| 114 |
if model is None:
|
| 115 |
raise ValueError(
|
|
|
|
| 132 |
metrics = PerformanceMetrics(
|
| 133 |
model_name=model_choice,
|
| 134 |
prediction_time=inference_time,
|
| 135 |
+
preprocessing_time=0.0, # Will be updated by calling function if available
|
| 136 |
total_time=inference_time,
|
| 137 |
memory_usage_mb=memory_usage,
|
| 138 |
accuracy=None, # Will be updated if ground truth is available
|
| 139 |
confidence=confidence,
|
| 140 |
+
timestamp=datetime.now().isoformat(),
|
| 141 |
+
input_size=(
|
| 142 |
+
len(y_resampled) if hasattr(y_resampled, "__len__") else TARGET_LEN
|
| 143 |
+
),
|
| 144 |
modality=modality,
|
| 145 |
)
|
| 146 |
+
|
| 147 |
+
tracker.log_performance(metrics)
|
| 148 |
+
except (AttributeError, ValueError, KeyError) as e:
|
| 149 |
+
# Don't fail inference if performance tracking fails
|
| 150 |
print(f"Performance tracking failed: {e}")
|
| 151 |
|
| 152 |
cleanup_memory()
|
| 153 |
return prediction, logits_list, probs, inference_time, logits
|
| 154 |
|
| 155 |
+
|
| 156 |
def _get_memory_usage() -> float:
|
| 157 |
"""Get current memory usage in MB"""
|
| 158 |
try:
|
| 159 |
import psutil
|
| 160 |
+
|
| 161 |
process = psutil.Process()
|
| 162 |
+
return process.memory_info().rss / 1024 / 1024 # Convert to MB
|
| 163 |
except ImportError:
|
| 164 |
return 0.0 # psutil not available
|
|
|
|
| 165 |
|
| 166 |
|
| 167 |
@st.cache_data
|
outputs/performance_tracking.db
DELETED
|
File without changes
|