hdallatorre commited on
Commit
33b4a3f
1 Parent(s): a13c03f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -47,7 +47,7 @@ _LAST_UPDATED = "Aug 28, 2023"
47
  banner_url = "./assets/logo.png"
48
  _BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>' # noqa
49
 
50
- _INTRODUCTION_TEXT = "The 🤗 Nucleotide Transformer Leaderboard aims to track, rank and evaluate DNA foundational models on a set of curated downstream tasks introduced in the huggingface dataset [nucleotide_transformer_downstream_tasks](https://huggingface.co/datasets/InstaDeepAI/nucleotide_transformer_downstream_tasks) , with a standardized evaluation protocole presented in the "Methods" tab." # noqa
51
 
52
 
53
  def retrieve_array_from_text(text):
@@ -173,7 +173,7 @@ with gr.Blocks() as demo:
173
  )
174
 
175
  with gr.TabItem("ℹ️ Methods", elem_id="od-benchmark-tab-table", id=1):
176
- gr.Markdown("We have compared the fine-tuned performance of Nucleotide Transformer models on the 18 downstream tasks with four different pre-trained models: [DNABERT-1](https://academic.oup.com/bioinformatics/article/37/15/2112/6128680), [DNABERT-2](https://arxiv.org/abs/2306.15006), [HyenaDNA](https://arxiv.org/abs/2306.15794) (1kb and 32kb context length) and the [Enformer](https://www.nature.com/articles/s41592-021-01252-x) (which was trained as a supervised model on several genomics tasks). We ported the architecture and trained weights of each model to our code framework and performed parameter-efficient fine-tuning for every model as described above, using the same cross-validation scheme for a fair comparison. All results can be visulaized in an interactive leader-board 2. Only for HyenaDNA we performed full fine-tuning due to the incompatibility of our parameter-efficient fine-tuning approach with the model architecture.", elem_classes="markdown-text")
177
 
178
  gr.Markdown(f"Last updated on **{_LAST_UPDATED}**", elem_classes="markdown-text")
179
 
 
47
  banner_url = "./assets/logo.png"
48
  _BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>' # noqa
49
 
50
+ _INTRODUCTION_TEXT = """The 🤗 Nucleotide Transformer Leaderboard aims to track, rank and evaluate DNA foundational models on a set of curated downstream tasks introduced in the huggingface dataset [nucleotide_transformer_downstream_tasks](https://huggingface.co/datasets/InstaDeepAI/nucleotide_transformer_downstream_tasks) , with a standardized evaluation protocole presented in the "Methods" tab.""" # noqa
51
 
52
 
53
  def retrieve_array_from_text(text):
 
173
  )
174
 
175
  with gr.TabItem("ℹ️ Methods", elem_id="od-benchmark-tab-table", id=1):
176
+ gr.Markdown("""We have compared the fine-tuned performance of Nucleotide Transformer models on the 18 downstream tasks with four different pre-trained models: [DNABERT-1](https://academic.oup.com/bioinformatics/article/37/15/2112/6128680), [DNABERT-2](https://arxiv.org/abs/2306.15006), [HyenaDNA](https://arxiv.org/abs/2306.15794) (1kb and 32kb context length) and the [Enformer](https://www.nature.com/articles/s41592-021-01252-x) (which was trained as a supervised model on several genomics tasks). We ported the architecture and trained weights of each model to our code framework and performed parameter-efficient fine-tuning for every model as described above, using the same cross-validation scheme for a fair comparison. All results can be visulaized in an interactive leader-board 2. Only for HyenaDNA we performed full fine-tuning due to the incompatibility of our parameter-efficient fine-tuning approach with the model architecture.""", elem_classes="markdown-text")
177
 
178
  gr.Markdown(f"Last updated on **{_LAST_UPDATED}**", elem_classes="markdown-text")
179