etrop commited on
Commit
a50fd2f
1 Parent(s): b7f0ad5

Create plant-multi-species-genomes.py

Browse files
Files changed (1) hide show
  1. plant-multi-species-genomes.py +161 -0
plant-multi-species-genomes.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """Script for the plant multi-species genomes dataset. This dataset contains the genomes
3
+ from 48 different species."""
4
+
5
+ from typing import List
6
+ import datasets
7
+ import pandas as pd
8
+ from Bio import SeqIO
9
+
10
+
11
+ # Find for instance the citation on arxiv or on the dataset repo/website
12
+ _CITATION = """"""
13
+
14
+ # You can copy an official description
15
+ _DESCRIPTION = """\
16
+ Dataset made of diverse genomes available on NCBI and coming from 48 different species.
17
+ Test and validation are made of 2 species each. The rest of the genomes are used for training.
18
+ Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side).The chunks of DNA are cleaned and processed so that
19
+ they can only contain the letters A, T, C, G and N.
20
+ """
21
+
22
+ _HOMEPAGE = "" #"https://www.ncbi.nlm.nih.gov/"
23
+
24
+ _LICENSE = "" #"https://www.ncbi.nlm.nih.gov/home/about/policies/"
25
+
26
+ _CHUNK_LENGTHS = [6000,]
27
+
28
+
29
+ def filter_fn(char: str) -> str:
30
+ """
31
+ Transforms any letter different from a base nucleotide into an 'N'.
32
+ """
33
+ if char in {'A', 'T', 'C', 'G'}:
34
+ return char
35
+ else:
36
+ return 'N'
37
+
38
+
39
+ def clean_sequence(seq: str) -> str:
40
+ """
41
+ Process a chunk of DNA to have all letters in upper and restricted to
42
+ A, T, C, G and N.
43
+ """
44
+ seq = seq.upper()
45
+ seq = map(filter_fn, seq)
46
+ seq = ''.join(list(seq))
47
+ return seq
48
+
49
+
50
+ class PlantMultiSpeciesGenomesConfig(datasets.BuilderConfig):
51
+ """BuilderConfig for the Plant Multi Species Pre-training Dataset."""
52
+
53
+ def __init__(self, *args, chunk_length: int, overlap: int = 100, **kwargs):
54
+ """BuilderConfig for the multi species genomes.
55
+ Args:
56
+ chunk_length (:obj:`int`): Chunk length.
57
+ overlap: (:obj:`int`): Overlap in base pairs for two consecutive chunks (defaults to 100).
58
+ **kwargs: keyword arguments forwarded to super.
59
+ """
60
+ num_kbp = int(chunk_length/1000)
61
+ super().__init__(
62
+ *args,
63
+ name=f'{num_kbp}kbp',
64
+ **kwargs,
65
+ )
66
+ self.chunk_length = chunk_length
67
+ self.overlap = overlap
68
+
69
+
70
+ class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
71
+ """Genomes from 48 species, filtered and split into chunks of consecutive
72
+ nucleotides. 2 genomes are taken for test, 2 for validation and 44
73
+ for training."""
74
+
75
+ VERSION = datasets.Version("1.1.0")
76
+ BUILDER_CONFIG_CLASS = PlantMultiSpeciesGenomesConfig
77
+ BUILDER_CONFIGS = [PlantMultiSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS]
78
+ DEFAULT_CONFIG_NAME = "6kbp"
79
+
80
+ def _info(self):
81
+
82
+ features = datasets.Features(
83
+ {
84
+ "sequence": datasets.Value("string"),
85
+ "description": datasets.Value("string"),
86
+ "start_pos": datasets.Value("int32"),
87
+ "end_pos": datasets.Value("int32"),
88
+ }
89
+ )
90
+ return datasets.DatasetInfo(
91
+ # This is the description that will appear on the datasets page.
92
+ description=_DESCRIPTION,
93
+ # This defines the different columns of the dataset and their types
94
+ features=features,
95
+ # Homepage of the dataset for documentation
96
+ homepage=_HOMEPAGE,
97
+ # License for the dataset if available
98
+ license=_LICENSE,
99
+ # Citation for the dataset
100
+ citation=_CITATION,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
104
+
105
+ filepaths_txt = dl_manager.download_and_extract('plant_genome_file_names.txt')
106
+ with open(filepaths_txt) as f:
107
+ filepaths = [line.rstrip() for filepath in f]
108
+
109
+ test_paths = filepaths[-2:] # 2 genomes for test set
110
+ validation_paths = filepaths[-4:-2] # 2 genomes for validation set
111
+ train_paths = filepaths[:-4] # 44 genomes for training
112
+
113
+ train_downloaded_files = dl_manager.download_and_extract(train_paths)
114
+ test_downloaded_files = dl_manager.download_and_extract(test_paths)
115
+ validation_downloaded_files = dl_manager.download_and_extract(validation_paths)
116
+
117
+ return [
118
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
119
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": validation_downloaded_files, "chunk_length": self.config.chunk_length}),
120
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": test_downloaded_files, "chunk_length": self.config.chunk_length}),
121
+ ]
122
+
123
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
124
+ def _generate_examples(self, files, chunk_length):
125
+ key = 0
126
+ for file in files:
127
+ with open(file, 'rt') as f:
128
+ fasta_sequences = SeqIO.parse(f, 'fasta')
129
+
130
+ for record in fasta_sequences:
131
+
132
+ # parse descriptions in the fasta file
133
+ sequence, description = str(record.seq), record.description
134
+
135
+ # clean chromosome sequence
136
+ sequence = clean_sequence(sequence)
137
+ seq_length = len(sequence)
138
+
139
+ # split into chunks
140
+ num_chunks = (seq_length - 2 * self.config.overlap) // chunk_length
141
+
142
+ if num_chunks < 1:
143
+ continue
144
+
145
+ sequence = sequence[:(chunk_length * num_chunks + 2 * self.config.overlap)]
146
+ seq_length = len(sequence)
147
+
148
+ for i in range(num_chunks):
149
+ # get chunk
150
+ start_pos = i * chunk_length
151
+ end_pos = min(seq_length, (i+1) * chunk_length + 2 * self.config.overlap)
152
+ chunk_sequence = sequence[start_pos:end_pos]
153
+
154
+ # yield chunk
155
+ yield key, {
156
+ 'sequence': chunk_sequence,
157
+ 'description': description,
158
+ 'start_pos': start_pos,
159
+ 'end_pos': end_pos,
160
+ }
161
+ key += 1