Spaces:
Sleeping
Sleeping
added metric selection functionality in comparision tab
Browse files
app.py
CHANGED
|
@@ -150,12 +150,20 @@ if __name__ == "__main__":
|
|
| 150 |
### EVALUATION METRICS COMPARISION ###
|
| 151 |
|
| 152 |
st.subheader("Evaluation Metrics Comparision") # , divider='rainbow')
|
| 153 |
-
metric_names = "\n".join(
|
| 154 |
-
|
| 155 |
-
)
|
| 156 |
st.markdown(
|
| 157 |
-
"The different evaluation metrics we have for the NER task are
|
| 158 |
-
f"{metric_names}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
)
|
| 160 |
|
| 161 |
with st.expander("View Predictions Details"):
|
|
@@ -175,4 +183,7 @@ if __name__ == "__main__":
|
|
| 175 |
|
| 176 |
metrics_df = predictions_df.drop(["ner_spans"], axis=1)
|
| 177 |
|
| 178 |
-
st.write(
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
### EVALUATION METRICS COMPARISION ###
|
| 151 |
|
| 152 |
st.subheader("Evaluation Metrics Comparision") # , divider='rainbow')
|
| 153 |
+
# metric_names = "\n".join(
|
| 154 |
+
# ["- " + evaluation_metric.name for evaluation_metric in EVALUATION_METRICS]
|
| 155 |
+
# )
|
| 156 |
st.markdown(
|
| 157 |
+
"The different evaluation metrics we have for the NER task are shown below, select the metrics to compare.\n"
|
| 158 |
+
# f"{metric_names}"
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
metrics_selection = [
|
| 162 |
+
(st.checkbox(evaluation_metric.name, value=True), evaluation_metric.name)
|
| 163 |
+
for evaluation_metric in EVALUATION_METRICS
|
| 164 |
+
]
|
| 165 |
+
metrics_to_show = list(
|
| 166 |
+
map(lambda x: x[1], filter(lambda x: x[0], metrics_selection))
|
| 167 |
)
|
| 168 |
|
| 169 |
with st.expander("View Predictions Details"):
|
|
|
|
| 183 |
|
| 184 |
metrics_df = predictions_df.drop(["ner_spans"], axis=1)
|
| 185 |
|
| 186 |
+
st.write(
|
| 187 |
+
metrics_df[["Prediction"] + metrics_to_show].to_html(escape=False),
|
| 188 |
+
unsafe_allow_html=True,
|
| 189 |
+
)
|