Spaces:
Runtime error
Runtime error
Nathan Habib
commited on
Commit
·
edfa118
1
Parent(s):
9844b3e
fixing unshowed models with using search bar
Browse files
app.py
CHANGED
|
@@ -100,11 +100,6 @@ models = original_df["model_name_for_query"].tolist() # needed for model backlin
|
|
| 100 |
|
| 101 |
to_be_dumped = f"models = {repr(models)}\n"
|
| 102 |
|
| 103 |
-
# with open("models_backlinks.py", "w") as f:
|
| 104 |
-
# f.write(to_be_dumped)
|
| 105 |
-
|
| 106 |
-
# print(to_be_dumped)
|
| 107 |
-
|
| 108 |
leaderboard_df = original_df.copy()
|
| 109 |
(
|
| 110 |
finished_eval_queue_df,
|
|
@@ -112,8 +107,6 @@ leaderboard_df = original_df.copy()
|
|
| 112 |
pending_eval_queue_df,
|
| 113 |
) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
|
| 114 |
|
| 115 |
-
print(leaderboard_df["Precision"].unique())
|
| 116 |
-
|
| 117 |
|
| 118 |
## INTERACTION FUNCTIONS
|
| 119 |
def add_new_eval(
|
|
@@ -225,7 +218,6 @@ def update_table(hidden_df: pd.DataFrame, current_columns_df: pd.DataFrame, colu
|
|
| 225 |
return df
|
| 226 |
|
| 227 |
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
| 228 |
-
print(query)
|
| 229 |
return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
|
| 230 |
|
| 231 |
def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
|
|
@@ -259,9 +251,8 @@ def filter_models(
|
|
| 259 |
filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
|
| 260 |
|
| 261 |
type_emoji = [t[0] for t in type_query]
|
| 262 |
-
|
| 263 |
-
filtered_df = filtered_df[df[AutoEvalColumn.
|
| 264 |
-
filtered_df = filtered_df[df[AutoEvalColumn.precision.name].isin(precision_query)]
|
| 265 |
|
| 266 |
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
|
| 267 |
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
|
|
@@ -327,14 +318,12 @@ with demo:
|
|
| 327 |
ModelType.FT.to_str(),
|
| 328 |
ModelType.IFT.to_str(),
|
| 329 |
ModelType.RL.to_str(),
|
| 330 |
-
ModelType.Unknown.to_str(),
|
| 331 |
],
|
| 332 |
value=[
|
| 333 |
ModelType.PT.to_str(),
|
| 334 |
ModelType.FT.to_str(),
|
| 335 |
ModelType.IFT.to_str(),
|
| 336 |
ModelType.RL.to_str(),
|
| 337 |
-
ModelType.Unknown.to_str(),
|
| 338 |
],
|
| 339 |
interactive=True,
|
| 340 |
elem_id="filter-columns-type",
|
|
|
|
| 100 |
|
| 101 |
to_be_dumped = f"models = {repr(models)}\n"
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
leaderboard_df = original_df.copy()
|
| 104 |
(
|
| 105 |
finished_eval_queue_df,
|
|
|
|
| 107 |
pending_eval_queue_df,
|
| 108 |
) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
|
| 109 |
|
|
|
|
|
|
|
| 110 |
|
| 111 |
## INTERACTION FUNCTIONS
|
| 112 |
def add_new_eval(
|
|
|
|
| 218 |
return df
|
| 219 |
|
| 220 |
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
|
|
|
| 221 |
return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
|
| 222 |
|
| 223 |
def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
|
|
|
|
| 251 |
filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
|
| 252 |
|
| 253 |
type_emoji = [t[0] for t in type_query]
|
| 254 |
+
filtered_df = filtered_df[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji + ["?"])]
|
| 255 |
+
filtered_df = filtered_df[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
|
|
|
|
| 256 |
|
| 257 |
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
|
| 258 |
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
|
|
|
|
| 318 |
ModelType.FT.to_str(),
|
| 319 |
ModelType.IFT.to_str(),
|
| 320 |
ModelType.RL.to_str(),
|
|
|
|
| 321 |
],
|
| 322 |
value=[
|
| 323 |
ModelType.PT.to_str(),
|
| 324 |
ModelType.FT.to_str(),
|
| 325 |
ModelType.IFT.to_str(),
|
| 326 |
ModelType.RL.to_str(),
|
|
|
|
| 327 |
],
|
| 328 |
interactive=True,
|
| 329 |
elem_id="filter-columns-type",
|