tpierrot commited on
Commit
198549f
1 Parent(s): 60c9ec1

Upload multi_species_genomes.py

Browse files
Files changed (1) hide show
  1. multi_species_genomes.py +179 -0
multi_species_genomes.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script
2
+ # contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Script for the multi-species genomes dataset. This dataset contains the genomes
16
+ from 850 different species."""
17
+
18
+ from typing import List
19
+ import datasets
20
+ import pandas as pd
21
+ from Bio import SeqIO
22
+
23
+
24
+ # Find for instance the citation on arxiv or on the dataset repo/website
25
+ _CITATION = """\
26
+ @article{o2016reference,
27
+ title={Reference sequence (RefSeq) database at NCBI: current status, taxonomic expansion, and functional annotation},
28
+ author={O'Leary, Nuala A and Wright, Mathew W and Brister, J Rodney and Ciufo, Stacy and Haddad, Diana and McVeigh, Rich and Rajput, Bhanu and Robbertse, Barbara and Smith-White, Brian and Ako-Adjei, Danso and others},
29
+ journal={Nucleic acids research},
30
+ volume={44},
31
+ number={D1},
32
+ pages={D733--D745},
33
+ year={2016},
34
+ publisher={Oxford University Press}
35
+ }
36
+ """
37
+
38
+ # You can copy an official description
39
+ _DESCRIPTION = """\
40
+ Genomes from 850 different species.
41
+ """
42
+
43
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/"
44
+
45
+ _LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/"
46
+
47
+ url_df = pd.read_csv('urls.csv')
48
+ urls = list(url_df['URL'])
49
+ _TEST_URLS = urls[-50:] # 50 genomes for test set
50
+ _VALIDATION_URLS = urls[-100:-50] # 50 genomes for validation set
51
+ _TRAIN_URLS = urls[:-100] # 800 genomes for training
52
+
53
+ _CHUNK_LENGTHS = [6000, 12000]
54
+ _OVERLAP = 100
55
+
56
+
57
+ def filter_fn(char: str) -> str:
58
+ """
59
+ Transforms any letter different from a base nucleotide into an 'N'.
60
+ """
61
+ if char in {'A', 'T', 'C', 'G'}:
62
+ return char
63
+ else:
64
+ return 'N'
65
+
66
+
67
+ def clean_sequence(seq: str) -> str:
68
+ """
69
+ Process a chunk of DNA to have all letters in upper and restricted to
70
+ A, T, C, G and N.
71
+ """
72
+ seq = seq.upper()
73
+ seq = map(filter_fn, seq)
74
+ seq = ''.join(list(seq))
75
+ return seq
76
+
77
+
78
+ class MultiSpeciesGenomesConfig(datasets.BuilderConfig):
79
+ """BuilderConfig for The Human Reference Genome."""
80
+
81
+ def __init__(self, *args, chunk_length: int, **kwargs):
82
+ """BuilderConfig for the multi species genomes.
83
+ Args:
84
+ chunk_length (:obj:`int`): Chunk length.
85
+ **kwargs: keyword arguments forwarded to super.
86
+ """
87
+ num_kbp = int(chunk_length/1000)
88
+ super().__init__(
89
+ *args,
90
+ name=f'{num_kbp}kbp',
91
+ **kwargs,
92
+ )
93
+ self.chunk_length = chunk_length
94
+
95
+
96
+ class MultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
97
+ """Genomes from 850 species, filtered and split into chunks of consecutive
98
+ nucleotides. 50 genomes are taken for test, 50 for validation and 800
99
+ for training."""
100
+
101
+ VERSION = datasets.Version("1.1.0")
102
+ BUILDER_CONFIG_CLASS = MultiSpeciesGenomesConfig
103
+ BUILDER_CONFIGS = [MultiSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS]
104
+ DEFAULT_CONFIG_NAME = "6kbp"
105
+
106
+ def _info(self):
107
+
108
+ features = datasets.Features(
109
+ {
110
+ "sequence": datasets.Value("string"),
111
+ "description": datasets.Value("string"),
112
+ "start_pos": datasets.Value("int"),
113
+ "end_pos": datasets.Value("int"),
114
+ }
115
+ )
116
+ return datasets.DatasetInfo(
117
+ # This is the description that will appear on the datasets page.
118
+ description=_DESCRIPTION,
119
+ # This defines the different columns of the dataset and their types
120
+ features=features,
121
+ # Homepage of the dataset for documentation
122
+ homepage=_HOMEPAGE,
123
+ # License for the dataset if available
124
+ license=_LICENSE,
125
+ # Citation for the dataset
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
130
+
131
+ train_downloaded_files = dl_manager.download_and_extract(_TRAIN_URLS)
132
+ test_downloaded_files = dl_manager.download_and_extract(_TEST_URLS)
133
+ validation_downloaded_files = dl_manager.download_and_extract(_VALIDATION_URLS)
134
+
135
+ return [
136
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
137
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": validation_downloaded_files, "chunk_length": self.config.chunk_length}),
138
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": test_downloaded_files, "chunk_length": self.config.chunk_length}),
139
+ ]
140
+
141
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
142
+ def _generate_examples(self, files, chunk_length):
143
+ key = 0
144
+ for file in files:
145
+ with open(file, 'rt') as f:
146
+ fasta_sequences = SeqIO.parse(f, 'fasta')
147
+
148
+ for record in fasta_sequences:
149
+
150
+ # parse descriptions in the fasta file
151
+ sequence, description = str(record.seq), record.description
152
+
153
+ # clean chromosome sequence
154
+ sequence = clean_sequence(sequence)
155
+ seq_length = len(sequence)
156
+
157
+ # split into chunks
158
+ num_chunks = (seq_length - 2 * _OVERLAP) // chunk_length
159
+
160
+ if num_chunks < 1:
161
+ continue
162
+
163
+ sequence = sequence[:(chunk_length * num_chunks + 2 * _OVERLAP)]
164
+ seq_length = len(sequence)
165
+
166
+ for i in range(num_chunks):
167
+ # get chunk
168
+ start_pos = i * chunk_length
169
+ end_pos = min(seq_length, (i+1) * chunk_length + 2 * _OVERLAP)
170
+ chunk_sequence = sequence[start_pos:end_pos]
171
+
172
+ # yield chunk
173
+ yield key, {
174
+ 'sequence': chunk_sequence,
175
+ 'description': description,
176
+ 'start_pos': start_pos,
177
+ 'end_pos': end_pos
178
+ }
179
+ key += 1