devjas1 commited on
Commit
8c75a82
·
1 Parent(s): 2547be1

fix(results): Improve formatting and consistency in results management functions

Browse files
Files changed (1) hide show
  1. utils/results_manager.py +52 -24
utils/results_manager.py CHANGED
@@ -31,7 +31,7 @@ class ResultsManager:
31
  logits: List[float],
32
  ground_truth: Optional[int] = None,
33
  processing_time: float = 0.0,
34
- metadata: Optional[Dict[str, Any]] = None
35
  ) -> None:
36
  """Add a single inference result to the results table"""
37
  ResultsManager.init_results_table()
@@ -46,7 +46,7 @@ class ResultsManager:
46
  "logits": logits,
47
  "ground_truth": ground_truth,
48
  "processing_time": processing_time,
49
- "metadata": metadata or {}
50
  }
51
 
52
  st.session_state[ResultsManager.RESULTS_KEY].append(result)
@@ -84,9 +84,17 @@ class ResultsManager:
84
  "Prediction": result["prediction"],
85
  "Predicted Class": result["predicted_class"],
86
  "Confidence": f"{result['confidence']:.3f}",
87
- "Stable Logit": f"{result['logits'][0]:.3f}" if len(result['logits']) > 0 else "N/A",
88
- "Weathered Logit": f"{result['logits'][1]:.3f}" if len(result['logits']) > 1 else "N/A",
89
- "Ground Truth": result["ground_truth"] if result["ground_truth"] is not None else "Unknown",
 
 
 
 
 
 
 
 
90
  "Processing Time (s)": f"{result['processing_time']:.3f}",
91
  }
92
  df_data.append(row)
@@ -103,7 +111,7 @@ class ResultsManager:
103
  # ===Use StringIO to create CSV in memory===
104
  csv_buffer = io.StringIO()
105
  df.to_csv(csv_buffer, index=False)
106
- return csv_buffer.getvalue().encode('utf-8')
107
 
108
  @staticmethod
109
  def export_to_json() -> str:
@@ -126,12 +134,16 @@ class ResultsManager:
126
  "stable_predictions": sum(1 for r in results if r["prediction"] == 0),
127
  "weathered_predictions": sum(1 for r in results if r["prediction"] == 1),
128
  "avg_confidence": sum(r["confidence"] for r in results) / len(results),
129
- "avg_processing_time": sum(r["processing_time"] for r in results) / len(results),
130
- "files_with_ground_truth": sum(1 for r in results if r["ground_truth"] is not None),
 
 
 
131
  }
132
  # ===Calculate accuracy if ground truth is available===
133
  correct_predictions = sum(
134
- 1 for r in results
 
135
  if r["ground_truth"] is not None and r["prediction"] == r["ground_truth"]
136
  )
137
  total_with_gt = stats["files_with_ground_truth"]
@@ -164,11 +176,13 @@ class ResultsManager:
164
  "status_type": "info",
165
  "input_text": None,
166
  "filename": None,
167
- "input_source": None, # "upload", "batch" or "sample"
168
  "sample_select": "-- Select Sample --",
169
- "input_mode": "Upload File", # controls which pane is visible
170
  "inference_run_once": False,
171
- "x_raw": None, "y_raw": None, "y_resampled": None,
 
 
172
  "log_messages": [],
173
  "uploader_version": 0,
174
  "current_upload_key": "upload_txt_0",
@@ -184,6 +198,9 @@ class ResultsManager:
184
  @staticmethod
185
  def reset_ephemeral_state():
186
  """Comprehensive reset for the entire app state."""
 
 
 
187
  # Define keys that should NOT be cleared by a full reset
188
  keep_keys = {"model_select", "input_mode"}
189
 
@@ -191,12 +208,17 @@ class ResultsManager:
191
  if k not in keep_keys:
192
  st.session_state.pop(k, None)
193
 
194
- # Re-initialize the core state after clearing
195
- ResultsManager.init_session_state()
 
 
 
196
 
197
- # CRITICAL: Bump the uploader version to force a widget reset
198
- st.session_state["uploader_version"] += 1
199
- st.session_state["current_upload_key"] = f"upload_txt_{st.session_state['uploader_version']}"
 
 
200
 
201
  @staticmethod
202
  def display_results_table() -> None:
@@ -205,7 +227,8 @@ class ResultsManager:
205
 
206
  if df.empty:
207
  st.info(
208
- "No inference results yet. Upload files and run analysis to see results here.")
 
209
  return
210
 
211
  st.subheader(f"Inference Results ({len(df)} files)")
@@ -220,7 +243,9 @@ class ResultsManager:
220
  st.metric("Avg Confidence", f"{stats['avg_confidence']:.3f}")
221
  with col3:
222
  st.metric(
223
- "Stable/Weathered", f"{stats['stable_predictions']}/{stats['weathered_predictions']}")
 
 
224
  with col4:
225
  if stats["accuracy"] is not None:
226
  st.metric("Accuracy", f"{stats['accuracy']:.3f}")
@@ -231,7 +256,7 @@ class ResultsManager:
231
  st.dataframe(df, use_container_width=True)
232
 
233
  # ==Export Button==
234
- col1, col2, col3 = st.columns([1, 1, 2])
235
 
236
  with col1:
237
  csv_data = ResultsManager.export_to_csv()
@@ -240,7 +265,7 @@ class ResultsManager:
240
  label="Download CSV",
241
  data=csv_data,
242
  file_name=f"polymer_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv",
243
- mime="text/csv"
244
  )
245
 
246
  with col2:
@@ -250,9 +275,12 @@ class ResultsManager:
250
  label="📥 Download JSON",
251
  data=json_data,
252
  file_name=f"polymer_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
253
- mime="application/json"
254
  )
255
 
256
  with col3:
257
- if st.button("Clear All Results", help="Clear all stored results", on_click=ResultsManager.reset_ephemeral_state):
258
- st.rerun()
 
 
 
 
31
  logits: List[float],
32
  ground_truth: Optional[int] = None,
33
  processing_time: float = 0.0,
34
+ metadata: Optional[Dict[str, Any]] = None,
35
  ) -> None:
36
  """Add a single inference result to the results table"""
37
  ResultsManager.init_results_table()
 
46
  "logits": logits,
47
  "ground_truth": ground_truth,
48
  "processing_time": processing_time,
49
+ "metadata": metadata or {},
50
  }
51
 
52
  st.session_state[ResultsManager.RESULTS_KEY].append(result)
 
84
  "Prediction": result["prediction"],
85
  "Predicted Class": result["predicted_class"],
86
  "Confidence": f"{result['confidence']:.3f}",
87
+ "Stable Logit": (
88
+ f"{result['logits'][0]:.3f}" if len(result["logits"]) > 0 else "N/A"
89
+ ),
90
+ "Weathered Logit": (
91
+ f"{result['logits'][1]:.3f}" if len(result["logits"]) > 1 else "N/A"
92
+ ),
93
+ "Ground Truth": (
94
+ result["ground_truth"]
95
+ if result["ground_truth"] is not None
96
+ else "Unknown"
97
+ ),
98
  "Processing Time (s)": f"{result['processing_time']:.3f}",
99
  }
100
  df_data.append(row)
 
111
  # ===Use StringIO to create CSV in memory===
112
  csv_buffer = io.StringIO()
113
  df.to_csv(csv_buffer, index=False)
114
+ return csv_buffer.getvalue().encode("utf-8")
115
 
116
  @staticmethod
117
  def export_to_json() -> str:
 
134
  "stable_predictions": sum(1 for r in results if r["prediction"] == 0),
135
  "weathered_predictions": sum(1 for r in results if r["prediction"] == 1),
136
  "avg_confidence": sum(r["confidence"] for r in results) / len(results),
137
+ "avg_processing_time": sum(r["processing_time"] for r in results)
138
+ / len(results),
139
+ "files_with_ground_truth": sum(
140
+ 1 for r in results if r["ground_truth"] is not None
141
+ ),
142
  }
143
  # ===Calculate accuracy if ground truth is available===
144
  correct_predictions = sum(
145
+ 1
146
+ for r in results
147
  if r["ground_truth"] is not None and r["prediction"] == r["ground_truth"]
148
  )
149
  total_with_gt = stats["files_with_ground_truth"]
 
176
  "status_type": "info",
177
  "input_text": None,
178
  "filename": None,
179
+ "input_source": None, # "upload", "batch" or "sample"
180
  "sample_select": "-- Select Sample --",
181
+ "input_mode": "Upload File", # controls which pane is visible
182
  "inference_run_once": False,
183
+ "x_raw": None,
184
+ "y_raw": None,
185
+ "y_resampled": None,
186
  "log_messages": [],
187
  "uploader_version": 0,
188
  "current_upload_key": "upload_txt_0",
 
198
  @staticmethod
199
  def reset_ephemeral_state():
200
  """Comprehensive reset for the entire app state."""
201
+
202
+ current_version = st.session_state.get("uploader_version", 0)
203
+
204
  # Define keys that should NOT be cleared by a full reset
205
  keep_keys = {"model_select", "input_mode"}
206
 
 
208
  if k not in keep_keys:
209
  st.session_state.pop(k, None)
210
 
211
+ st.session_state["status_message"] = "Ready to analyze polymer spectra"
212
+ st.session_state["status_type"] = "info"
213
+ st.session_state["batch_files"] = []
214
+ st.session_state["inference_run_once"] = True
215
+ st.session_state[""] = ""
216
 
217
+ # CRITICAL: Increment the preserved version and re-assign it
218
+ st.session_state["uploader_version"] = current_version + 1
219
+ st.session_state["current_upload_key"] = (
220
+ f"upload_txt_{st.session_state['uploader_version']}"
221
+ )
222
 
223
  @staticmethod
224
  def display_results_table() -> None:
 
227
 
228
  if df.empty:
229
  st.info(
230
+ "No inference results yet. Upload files and run analysis to see results here."
231
+ )
232
  return
233
 
234
  st.subheader(f"Inference Results ({len(df)} files)")
 
243
  st.metric("Avg Confidence", f"{stats['avg_confidence']:.3f}")
244
  with col3:
245
  st.metric(
246
+ "Stable/Weathered",
247
+ f"{stats['stable_predictions']}/{stats['weathered_predictions']}",
248
+ )
249
  with col4:
250
  if stats["accuracy"] is not None:
251
  st.metric("Accuracy", f"{stats['accuracy']:.3f}")
 
256
  st.dataframe(df, use_container_width=True)
257
 
258
  # ==Export Button==
259
+ col1, col2, col3 = st.columns([1, 1, 1])
260
 
261
  with col1:
262
  csv_data = ResultsManager.export_to_csv()
 
265
  label="Download CSV",
266
  data=csv_data,
267
  file_name=f"polymer_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv",
268
+ mime="text/csv",
269
  )
270
 
271
  with col2:
 
275
  label="📥 Download JSON",
276
  data=json_data,
277
  file_name=f"polymer_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
278
+ mime="application/json",
279
  )
280
 
281
  with col3:
282
+ st.button(
283
+ "Clear All Results",
284
+ help="Clear all stored results",
285
+ on_click=ResultsManager.reset_ephemeral_state,
286
+ )