tpierrot commited on
Commit
d0b5dce
β€’
1 Parent(s): 1f6c2d7

moving from default sets to lists

Browse files
Files changed (1) hide show
  1. app.py +52 -34
app.py CHANGED
@@ -1,55 +1,56 @@
1
- import functools
2
 
3
  import gradio as gr
4
- import pandas as pd
5
  import numpy as np
6
- from typing import List
7
 
8
- _ORIGINAL_DF = pd.read_csv('./data/benchmark.csv')
9
- _METRICS = {'MCC', 'F1', 'ACC'}
10
- _AGGREGATION_METHODS = {'mean', 'max', 'min', 'median'}
11
- _DATASETS = set(_ORIGINAL_DF['Dataset'])
12
 
13
  _BIBTEX = """@article{DallaTorre2023TheNT,
14
- title={The Nucleotide Transformer: Building and Evaluating Robust Foundation Models for Human Genomics},
15
- author={Hugo Dalla-Torre and Liam Gonzalez and Javier Mendoza Revilla and Nicolas Lopez Carranza and Adam Henryk Grzywaczewski and Francesco Oteri and Christian Dallago and Evan Trop and Hassan Sirelkhatim and Guillaume Richard and Marcin J. Skwark and Karim Beguir and Marie Lopez and Thomas Pierrot},
16
  journal={bioRxiv},
17
  year={2023},
18
  url={https://api.semanticscholar.org/CorpusID:255943445}
19
  }
20
  """
21
- _LAST_UPDATED = 'Aug 28, 2023'
22
 
23
  banner_url = "./assets/logo.png"
24
- _BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>'
25
 
26
- _INTRODUCTION_TEXT = "The πŸ€— Nucleotide Transformer Leaderboard aims to track, rank and evaluate DNA foundational models on a set of curated downstream tasks with a standardized evaluation protocole."
27
 
28
 
29
  def retrieve_array_from_text(text):
30
- return np.fromstring(text.replace('[', '').replace(']', ''), dtype=float, sep=',')
31
 
32
 
33
  def format_number(x):
34
- return float(f'{x:.3}')
35
 
36
 
37
- def get_dataset(tasks: List[str], target_metric: str = 'MCC', aggregation_method: str = 'mean'):
 
 
38
 
39
  aggr_fn = getattr(np, aggregation_method)
40
  scores = _ORIGINAL_DF[target_metric].apply(retrieve_array_from_text).apply(aggr_fn)
41
  scores = scores.apply(format_number)
42
- df = _ORIGINAL_DF.drop(columns=list(_METRICS))
43
- df['Score'] = scores
44
- df = df.pivot(index='Model', columns='Dataset', values='Score')
45
  df = df[tasks]
46
- df['All Tasks'] = df.agg('mean', axis='columns').apply(format_number)
47
  columns = list(df.columns.values)
48
  columns.sort()
49
  df = df[columns]
50
  df.reset_index(inplace=True)
51
- df = df.rename(columns={'index': 'Model'})
52
- df = df.sort_values(by=['All Tasks'], ascending=False)
53
 
54
  leaderboard_table = gr.components.Dataframe.update(
55
  value=df,
@@ -68,29 +69,29 @@ with gr.Blocks() as demo:
68
 
69
  with gr.Row():
70
  metric_choice = gr.Dropdown(
71
- choices=list(_METRICS),
72
  value="MCC",
73
  label="Metric displayed.",
74
  )
75
  aggr_choice = gr.Dropdown(
76
- choices=list(_AGGREGATION_METHODS),
77
  value="mean",
78
  label="Aggregation used over 10-folds.",
79
  )
80
 
81
  with gr.Row():
82
  selected_tasks = gr.CheckboxGroup(
83
- choices=list(_DATASETS),
84
- value=list(_DATASETS),
85
- label="Tasks",
86
- info="Downstream tasks."
87
  )
 
88
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
89
  with gr.TabItem("πŸ… Leaderboard", elem_id="od-benchmark-tab-table", id=0):
90
- dataframe = gr.components.Dataframe(elem_id="leaderboard-table",)
 
 
91
 
92
  with gr.TabItem("πŸ“ˆ Metrics", elem_id="od-benchmark-tab-table", id=1):
93
- gr.Markdown('Hey hey hey', elem_classes="markdown-text")
94
 
95
  # with gr.TabItem("βœ‰οΈβœ¨ Request a model here!", elem_id="od-benchmark-tab-table",
96
  # id=2):
@@ -114,14 +115,31 @@ with gr.Blocks() as demo:
114
  with gr.Row():
115
  with gr.Accordion("πŸ“™ Citation", open=False):
116
  gr.Textbox(
117
- value=_BIBTEX, lines=7,
 
118
  label="Copy the BibTeX snippet to cite this source",
119
  elem_id="citation-button",
120
  ).style(show_copy_button=True)
121
 
122
- selected_tasks.change(get_dataset, inputs=[selected_tasks, metric_choice, aggr_choice], outputs=dataframe)
123
- metric_choice.change(get_dataset, inputs=[selected_tasks, metric_choice, aggr_choice], outputs=dataframe)
124
- aggr_choice.change(get_dataset, inputs=[selected_tasks, metric_choice, aggr_choice], outputs=dataframe)
125
- demo.load(fn=get_dataset, inputs=[selected_tasks, metric_choice, aggr_choice], outputs=dataframe)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  demo.launch()
 
1
+ from typing import List
2
 
3
  import gradio as gr
 
4
  import numpy as np
5
+ import pandas as pd
6
 
7
+ _ORIGINAL_DF = pd.read_csv("./data/benchmark.csv")
8
+ _METRICS = ["MCC", "F1", "ACC"]
9
+ _AGGREGATION_METHODS = ["mean", "max", "min", "median"]
10
+ _DATASETS = list(set(_ORIGINAL_DF["Dataset"]))
11
 
12
  _BIBTEX = """@article{DallaTorre2023TheNT,
13
+ title={The Nucleotide Transformer: Building and Evaluating Robust Foundation Models for Human Genomics}, # noqa
14
+ author={Hugo Dalla-Torre and Liam Gonzalez and Javier Mendoza Revilla and Nicolas Lopez Carranza and Adam Henryk Grzywaczewski and Francesco Oteri and Christian Dallago and Evan Trop and Hassan Sirelkhatim and Guillaume Richard and Marcin J. Skwark and Karim Beguir and Marie Lopez and Thomas Pierrot}, # noqa
15
  journal={bioRxiv},
16
  year={2023},
17
  url={https://api.semanticscholar.org/CorpusID:255943445}
18
  }
19
  """
20
+ _LAST_UPDATED = "Aug 28, 2023"
21
 
22
  banner_url = "./assets/logo.png"
23
+ _BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>' # noqa
24
 
25
+ _INTRODUCTION_TEXT = "The πŸ€— Nucleotide Transformer Leaderboard aims to track, rank and evaluate DNA foundational models on a set of curated downstream tasks with a standardized evaluation protocole." # noqa
26
 
27
 
28
  def retrieve_array_from_text(text):
29
+ return np.fromstring(text.replace("[", "").replace("]", ""), dtype=float, sep=",")
30
 
31
 
32
  def format_number(x):
33
+ return float(f"{x:.3}")
34
 
35
 
36
+ def get_dataset(
37
+ tasks: List[str], target_metric: str = "MCC", aggregation_method: str = "mean"
38
+ ):
39
 
40
  aggr_fn = getattr(np, aggregation_method)
41
  scores = _ORIGINAL_DF[target_metric].apply(retrieve_array_from_text).apply(aggr_fn)
42
  scores = scores.apply(format_number)
43
+ df = _ORIGINAL_DF.drop(columns=_METRICS)
44
+ df["Score"] = scores
45
+ df = df.pivot(index="Model", columns="Dataset", values="Score")
46
  df = df[tasks]
47
+ df["All Tasks"] = df.agg("mean", axis="columns").apply(format_number)
48
  columns = list(df.columns.values)
49
  columns.sort()
50
  df = df[columns]
51
  df.reset_index(inplace=True)
52
+ df = df.rename(columns={"index": "Model"})
53
+ df = df.sort_values(by=["All Tasks"], ascending=False)
54
 
55
  leaderboard_table = gr.components.Dataframe.update(
56
  value=df,
 
69
 
70
  with gr.Row():
71
  metric_choice = gr.Dropdown(
72
+ choices=_METRICS,
73
  value="MCC",
74
  label="Metric displayed.",
75
  )
76
  aggr_choice = gr.Dropdown(
77
+ choices=_AGGREGATION_METHODS,
78
  value="mean",
79
  label="Aggregation used over 10-folds.",
80
  )
81
 
82
  with gr.Row():
83
  selected_tasks = gr.CheckboxGroup(
84
+ choices=_DATASETS, value=_DATASETS, label="Tasks", info="Downstream tasks."
 
 
 
85
  )
86
+
87
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
88
  with gr.TabItem("πŸ… Leaderboard", elem_id="od-benchmark-tab-table", id=0):
89
+ dataframe = gr.components.Dataframe(
90
+ elem_id="leaderboard-table",
91
+ )
92
 
93
  with gr.TabItem("πŸ“ˆ Metrics", elem_id="od-benchmark-tab-table", id=1):
94
+ gr.Markdown("Hey hey hey", elem_classes="markdown-text")
95
 
96
  # with gr.TabItem("βœ‰οΈβœ¨ Request a model here!", elem_id="od-benchmark-tab-table",
97
  # id=2):
 
115
  with gr.Row():
116
  with gr.Accordion("πŸ“™ Citation", open=False):
117
  gr.Textbox(
118
+ value=_BIBTEX,
119
+ lines=7,
120
  label="Copy the BibTeX snippet to cite this source",
121
  elem_id="citation-button",
122
  ).style(show_copy_button=True)
123
 
124
+ selected_tasks.change(
125
+ get_dataset,
126
+ inputs=[selected_tasks, metric_choice, aggr_choice],
127
+ outputs=dataframe,
128
+ )
129
+ metric_choice.change(
130
+ get_dataset,
131
+ inputs=[selected_tasks, metric_choice, aggr_choice],
132
+ outputs=dataframe,
133
+ )
134
+ aggr_choice.change(
135
+ get_dataset,
136
+ inputs=[selected_tasks, metric_choice, aggr_choice],
137
+ outputs=dataframe,
138
+ )
139
+ demo.load(
140
+ fn=get_dataset,
141
+ inputs=[selected_tasks, metric_choice, aggr_choice],
142
+ outputs=dataframe,
143
+ )
144
 
145
  demo.launch()