Revert Convert dataset to Parquet

#7
by albertvillanova HF staff - opened
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
@@ -9,6 +10,7 @@ license:
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
 
12
  size_categories:
13
  - 1K<n<10K
14
  source_datasets:
@@ -16,10 +18,22 @@ source_datasets:
16
  task_categories:
17
  - automatic-speech-recognition
18
  task_ids: []
19
- paperswithcode_id: arabic-speech-corpus
20
- pretty_name: Arabic Speech Corpus
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  dataset_info:
22
- config_name: clean
23
  features:
24
  - name: file
25
  dtype: string
@@ -33,38 +47,16 @@ dataset_info:
33
  dtype: string
34
  - name: orthographic
35
  dtype: string
 
36
  splits:
37
  - name: train
38
- num_bytes: 1527815416.966
39
  num_examples: 1813
40
  - name: test
41
- num_bytes: 99851729.0
42
  num_examples: 100
43
- download_size: 1347643373
44
- dataset_size: 1627667145.966
45
- configs:
46
- - config_name: clean
47
- data_files:
48
- - split: train
49
- path: clean/train-*
50
- - split: test
51
- path: clean/test-*
52
- default: true
53
- train-eval-index:
54
- - config: clean
55
- task: automatic-speech-recognition
56
- task_id: speech_recognition
57
- splits:
58
- train_split: train
59
- eval_split: test
60
- col_mapping:
61
- file: path
62
- text: text
63
- metrics:
64
- - type: wer
65
- name: WER
66
- - type: cer
67
- name: CER
68
  ---
69
 
70
  # Dataset Card for Arabic Speech Corpus
 
1
  ---
2
+ pretty_name: Arabic Speech Corpus
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
 
10
  - cc-by-4.0
11
  multilinguality:
12
  - monolingual
13
+ paperswithcode_id: arabic-speech-corpus
14
  size_categories:
15
  - 1K<n<10K
16
  source_datasets:
 
18
  task_categories:
19
  - automatic-speech-recognition
20
  task_ids: []
21
+ train-eval-index:
22
+ - config: clean
23
+ task: automatic-speech-recognition
24
+ task_id: speech_recognition
25
+ splits:
26
+ train_split: train
27
+ eval_split: test
28
+ col_mapping:
29
+ file: path
30
+ text: text
31
+ metrics:
32
+ - type: wer
33
+ name: WER
34
+ - type: cer
35
+ name: CER
36
  dataset_info:
 
37
  features:
38
  - name: file
39
  dtype: string
 
47
  dtype: string
48
  - name: orthographic
49
  dtype: string
50
+ config_name: clean
51
  splits:
52
  - name: train
53
+ num_bytes: 1002365
54
  num_examples: 1813
55
  - name: test
56
+ num_bytes: 65784
57
  num_examples: 100
58
+ download_size: 1192302846
59
+ dataset_size: 1068149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  ---
61
 
62
  # Dataset Card for Arabic Speech Corpus
arabic_speech_corpus.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Arabic Speech Corpus"""
18
+
19
+
20
+ import os
21
+
22
+ import datasets
23
+ from datasets.tasks import AutomaticSpeechRecognition
24
+
25
+
26
+ _CITATION = """\
27
+ @phdthesis{halabi2016modern,
28
+ title={Modern standard Arabic phonetics for speech synthesis},
29
+ author={Halabi, Nawar},
30
+ year={2016},
31
+ school={University of Southampton}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ This Speech corpus has been developed as part of PhD work carried out by Nawar Halabi at the University of Southampton.
37
+ The corpus was recorded in south Levantine Arabic
38
+ (Damascian accent) using a professional studio. Synthesized speech as an output using this corpus has produced a high quality, natural voice.
39
+ Note that in order to limit the required storage for preparing this dataset, the audio
40
+ is stored in the .flac format and is not converted to a float32 array. To convert, the audio
41
+ file to a float32 array, please make use of the `.map()` function as follows:
42
+
43
+
44
+ ```python
45
+ import soundfile as sf
46
+
47
+ def map_to_array(batch):
48
+ speech_array, _ = sf.read(batch["file"])
49
+ batch["speech"] = speech_array
50
+ return batch
51
+
52
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
53
+ ```
54
+ """
55
+
56
+ _URL = "http://en.arabicspeechcorpus.com/arabic-speech-corpus.zip"
57
+
58
+
59
+ class ArabicSpeechCorpusConfig(datasets.BuilderConfig):
60
+ """BuilderConfig for ArabicSpeechCorpu."""
61
+
62
+ def __init__(self, **kwargs):
63
+ """
64
+ Args:
65
+ data_dir: `string`, the path to the folder containing the files in the
66
+ downloaded .tar
67
+ citation: `string`, citation for the data set
68
+ url: `string`, url for information about the data set
69
+ **kwargs: keyword arguments forwarded to super.
70
+ """
71
+ super(ArabicSpeechCorpusConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
72
+
73
+
74
+ class ArabicSpeechCorpus(datasets.GeneratorBasedBuilder):
75
+ """ArabicSpeechCorpus dataset."""
76
+
77
+ BUILDER_CONFIGS = [
78
+ ArabicSpeechCorpusConfig(name="clean", description="'Clean' speech."),
79
+ ]
80
+
81
+ def _info(self):
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=datasets.Features(
85
+ {
86
+ "file": datasets.Value("string"),
87
+ "text": datasets.Value("string"),
88
+ "audio": datasets.Audio(sampling_rate=48_000),
89
+ "phonetic": datasets.Value("string"),
90
+ "orthographic": datasets.Value("string"),
91
+ }
92
+ ),
93
+ supervised_keys=("file", "text"),
94
+ homepage=_URL,
95
+ citation=_CITATION,
96
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ archive_path = dl_manager.download_and_extract(_URL)
101
+ archive_path = os.path.join(archive_path, "arabic-speech-corpus")
102
+ return [
103
+ datasets.SplitGenerator(name="train", gen_kwargs={"archive_path": archive_path}),
104
+ datasets.SplitGenerator(name="test", gen_kwargs={"archive_path": os.path.join(archive_path, "test set")}),
105
+ ]
106
+
107
+ def _generate_examples(self, archive_path):
108
+ """Generate examples from a Librispeech archive_path."""
109
+ lab_dir = os.path.join(archive_path, "lab")
110
+ wav_dir = os.path.join(archive_path, "wav")
111
+ if "test set" in archive_path:
112
+ phonetic_path = os.path.join(archive_path, "phonetic-transcript.txt")
113
+ else:
114
+ phonetic_path = os.path.join(archive_path, "phonetic-transcipt.txt")
115
+
116
+ orthographic_path = os.path.join(archive_path, "orthographic-transcript.txt")
117
+
118
+ phonetics = {}
119
+ orthographics = {}
120
+
121
+ with open(phonetic_path, "r", encoding="utf-8") as f:
122
+ for line in f:
123
+ wav_file, phonetic = line.split('"')[1::2]
124
+ phonetics[wav_file] = phonetic
125
+
126
+ with open(orthographic_path, "r", encoding="utf-8") as f:
127
+ for line in f:
128
+ wav_file, orthographic = line.split('"')[1::2]
129
+ orthographics[wav_file] = orthographic
130
+
131
+ for _id, lab_name in enumerate(sorted(os.listdir(lab_dir))):
132
+ lab_path = os.path.join(lab_dir, lab_name)
133
+ lab_text = open(lab_path, "r", encoding="utf-8").read()
134
+
135
+ wav_name = lab_name[:-4] + ".wav"
136
+ wav_path = os.path.join(wav_dir, wav_name)
137
+
138
+ example = {
139
+ "file": wav_path,
140
+ "audio": wav_path,
141
+ "text": lab_text,
142
+ "phonetic": phonetics[wav_name],
143
+ "orthographic": orthographics[wav_name],
144
+ }
145
+ yield str(_id), example
clean/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd5ea889532615c4ca9c63b5b83fc3bacb94e9fa156c26f5963b8da2c8e87768
3
- size 90899095
 
 
 
 
clean/train-00000-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c3f2931ab19224daf55126c1cf96ff068f3ad442d760c1f5db99805d5a290be
3
- size 398895011
 
 
 
 
clean/train-00001-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d02e7e7d080082d1d96929b83e19b924d7c10c8b59a39f190c373245559ea36d
3
- size 322764456
 
 
 
 
clean/train-00002-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ea3385f7d8496bf1e77d9b1a2696fb2bb3769e1ffa060e43fa4fc6c5e25cf06
3
- size 291793854
 
 
 
 
clean/train-00003-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:91bbec3487d3ba745113c5869be40a6008ef815b9681fe683cf7ab46dd06efcf
3
- size 243290957