mgyigit commited on
Commit
b04751e
·
verified ·
1 Parent(s): da1a0ec

Update src/vis_utils.py

Browse files
Files changed (1) hide show
  1. src/vis_utils.py +4 -4
src/vis_utils.py CHANGED
@@ -301,7 +301,7 @@ def plot_affinity_results(method_names, metric, affinity_path="/tmp/affinity_res
301
  return filename
302
 
303
  def update_metric_choices(benchmark_type):
304
- if benchmark_type == 'similarity':
305
  # Show x and y metric selectors for similarity
306
  metric_names = benchmark_specific_metrics.get(benchmark_type, [])
307
  return (
@@ -309,7 +309,7 @@ def update_metric_choices(benchmark_type):
309
  gr.update(choices=metric_names, value=metric_names[1], visible=True),
310
  gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
311
  )
312
- elif benchmark_type == 'function':
313
  # Show aspect and dataset type selectors for function
314
  aspect_types = benchmark_specific_metrics[benchmark_type]['aspect_types']
315
  metric_types = benchmark_specific_metrics[benchmark_type]['dataset_types']
@@ -319,7 +319,7 @@ def update_metric_choices(benchmark_type):
319
  gr.update(visible=False),
320
  gr.update(choices=metric_types, value=metric_types[0], visible=True)
321
  )
322
- elif benchmark_type == 'family':
323
  # Show dataset and metric selectors for family
324
  datasets = benchmark_specific_metrics[benchmark_type]['datasets']
325
  metrics = benchmark_specific_metrics[benchmark_type]['metrics']
@@ -328,7 +328,7 @@ def update_metric_choices(benchmark_type):
328
  gr.update(choices=datasets, value=datasets[0], visible=True),
329
  gr.update(visible=False)
330
  )
331
- elif benchmark_type == 'affinity':
332
  # Show single metric selector for affinity
333
  metrics = benchmark_specific_metrics[benchmark_type]
334
  return (
 
301
  return filename
302
 
303
  def update_metric_choices(benchmark_type):
304
+ if benchmark_type == 'Semantic Similarity Inference':
305
  # Show x and y metric selectors for similarity
306
  metric_names = benchmark_specific_metrics.get(benchmark_type, [])
307
  return (
 
309
  gr.update(choices=metric_names, value=metric_names[1], visible=True),
310
  gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
311
  )
312
+ elif benchmark_type == 'Ontology-based Function Prediction':
313
  # Show aspect and dataset type selectors for function
314
  aspect_types = benchmark_specific_metrics[benchmark_type]['aspect_types']
315
  metric_types = benchmark_specific_metrics[benchmark_type]['dataset_types']
 
319
  gr.update(visible=False),
320
  gr.update(choices=metric_types, value=metric_types[0], visible=True)
321
  )
322
+ elif benchmark_type == 'Drug Target Protein Family Classification':
323
  # Show dataset and metric selectors for family
324
  datasets = benchmark_specific_metrics[benchmark_type]['datasets']
325
  metrics = benchmark_specific_metrics[benchmark_type]['metrics']
 
328
  gr.update(choices=datasets, value=datasets[0], visible=True),
329
  gr.update(visible=False)
330
  )
331
+ elif benchmark_type == 'Protein Protein Binding Affinity Estimation':
332
  # Show single metric selector for affinity
333
  metrics = benchmark_specific_metrics[benchmark_type]
334
  return (