j-krzywdziak commited on
Commit
dca3754
·
1 Parent(s): 5aeb5cb

Update test.py

Browse files
Files changed (1) hide show
  1. test.py +119 -262
test.py CHANGED
@@ -1,262 +1,119 @@
1
- # coding=utf-8
2
- # Lint as: python3
3
- """test set"""
4
-
5
-
6
- import csv
7
- import os
8
- import json
9
-
10
- import datasets
11
- from datasets.utils.py_utils import size_str
12
- from tqdm import tqdm
13
-
14
-
15
- _CITATION = """\
16
- @inproceedings{panayotov2015librispeech,
17
- title={Librispeech: an ASR corpus based on public domain audio books},
18
- author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
19
- booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
20
- pages={5206--5210},
21
- year={2015},
22
- organization={IEEE}
23
- }
24
- """
25
-
26
- _DESCRIPTION = """\
27
- Lorem ipsum
28
- """
29
-
30
- _BASE_URL = "https://huggingface.co/datasets/j-krzywdziak/test/tree/main"
31
- _AUDIO_URL = _BASE_URL + "dev.tar.gz"
32
- _TRANSCRIPT_URL = _BASE_URL + "dev.tsv"
33
-
34
- # _DL_URLS = {
35
- # "clean": {
36
- # "dev": _DL_URL + "dev-clean.tar.gz",
37
- # "test": _DL_URL + "test-clean.tar.gz",
38
- # "train.100": _DL_URL + "train-clean-100.tar.gz",
39
- # "train.360": _DL_URL + "train-clean-360.tar.gz",
40
- # },
41
- # "other": {
42
- # "test": _DL_URL + "test-other.tar.gz",
43
- # "dev": _DL_URL + "dev-other.tar.gz",
44
- # "train.500": _DL_URL + "train-other-500.tar.gz",
45
- # },
46
- # "all": {
47
- # "dev.clean": _DL_URL + "dev-clean.tar.gz",
48
- # "dev.other": _DL_URL + "dev-other.tar.gz",
49
- # "test.clean": _DL_URL + "test-clean.tar.gz",
50
- # "test.other": _DL_URL + "test-other.tar.gz",
51
- # "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
52
- # "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
53
- # "train.other.500": _DL_URL + "train-other-500.tar.gz",
54
- # },
55
- # }
56
-
57
- logger = datasets.logging.get_logger(__name__)
58
-
59
-
60
- class TestASRConfig(datasets.BuilderConfig):
61
- """BuilderConfig for TestASR."""
62
-
63
- def __init__(self, **kwargs):
64
- """
65
- Args:
66
- data_dir: `string`, the path to the folder containing the files in the
67
- downloaded .tar
68
- citation: `string`, citation for the data set
69
- url: `string`, url for information about the data set
70
- **kwargs: keyword arguments forwarded to super.
71
- """
72
- super(TestASRConfig, self).__init__(**kwargs)
73
-
74
-
75
- class TestASR(datasets.GeneratorBasedBuilder):
76
- """Test dataset."""
77
- #
78
- # DEFAULT_WRITER_BATCH_SIZE = 256
79
- # DEFAULT_CONFIG_NAME = "all"
80
- # BUILDER_CONFIGS = [
81
- # LibrispeechASRConfig(name="clean", description="'Clean' speech."),
82
- # LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
83
- # LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
84
- # ]
85
-
86
- def _info(self):
87
- return datasets.DatasetInfo(
88
- description=_DESCRIPTION,
89
- features=datasets.Features(
90
- {
91
- "audio_id": datasets.Value("string"),
92
- "audio": datasets.Audio(sampling_rate=16_000),
93
- "ngram": datasets.Value("string")
94
- }
95
- ),
96
- supervised_keys=None,
97
- homepage=_BASE_URL,
98
- citation=_CITATION
99
- )
100
-
101
- def _split_generators(self, dl_manager):
102
- archive_path = dl_manager.download(_AUDIO_URL)
103
- # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
104
- local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
105
- meta_path = dl_manager.download(_TRANSCRIPT_URL)
106
- return [datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- gen_kwargs={
109
- "audio_id": meta_path,
110
- "ngram": meta_path,
111
- "audio": local_extracted_archive
112
- }
113
- )]
114
- #
115
- # if self.config.name == "clean":
116
- # train_splits = [
117
- # datasets.SplitGenerator(
118
- # name="train.100",
119
- # gen_kwargs={
120
- # "local_extracted_archive": local_extracted_archive.get("train.100"),
121
- # "files": dl_manager.iter_archive(archive_path["train.100"]),
122
- # },
123
- # ),
124
- # datasets.SplitGenerator(
125
- # name="train.360",
126
- # gen_kwargs={
127
- # "local_extracted_archive": local_extracted_archive.get("train.360"),
128
- # "files": dl_manager.iter_archive(archive_path["train.360"]),
129
- # },
130
- # ),
131
- # ]
132
- # dev_splits = [
133
- # datasets.SplitGenerator(
134
- # name=datasets.Split.VALIDATION,
135
- # gen_kwargs={
136
- # "local_extracted_archive": local_extracted_archive.get("dev"),
137
- # "files": dl_manager.iter_archive(archive_path["dev"]),
138
- # },
139
- # )
140
- # ]
141
- # test_splits = [
142
- # datasets.SplitGenerator(
143
- # name=datasets.Split.TEST,
144
- # gen_kwargs={
145
- # "local_extracted_archive": local_extracted_archive.get("test"),
146
- # "files": dl_manager.iter_archive(archive_path["test"]),
147
- # },
148
- # )
149
- # ]
150
- # elif self.config.name == "other":
151
- # train_splits = [
152
- # datasets.SplitGenerator(
153
- # name="train.500",
154
- # gen_kwargs={
155
- # "local_extracted_archive": local_extracted_archive.get("train.500"),
156
- # "files": dl_manager.iter_archive(archive_path["train.500"]),
157
- # },
158
- # )
159
- # ]
160
- # dev_splits = [
161
- # datasets.SplitGenerator(
162
- # name=datasets.Split.VALIDATION,
163
- # gen_kwargs={
164
- # "local_extracted_archive": local_extracted_archive.get("dev"),
165
- # "files": dl_manager.iter_archive(archive_path["dev"]),
166
- # },
167
- # )
168
- # ]
169
- # test_splits = [
170
- # datasets.SplitGenerator(
171
- # name=datasets.Split.TEST,
172
- # gen_kwargs={
173
- # "local_extracted_archive": local_extracted_archive.get("test"),
174
- # "files": dl_manager.iter_archive(archive_path["test"]),
175
- # },
176
- # )
177
- # ]
178
- # elif self.config.name == "all":
179
- # train_splits = [
180
- # datasets.SplitGenerator(
181
- # name="train.clean.100",
182
- # gen_kwargs={
183
- # "local_extracted_archive": local_extracted_archive.get("train.clean.100"),
184
- # "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
185
- # },
186
- # ),
187
- # datasets.SplitGenerator(
188
- # name="train.clean.360",
189
- # gen_kwargs={
190
- # "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
191
- # "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
192
- # },
193
- # ),
194
- # datasets.SplitGenerator(
195
- # name="train.other.500",
196
- # gen_kwargs={
197
- # "local_extracted_archive": local_extracted_archive.get("train.other.500"),
198
- # "files": dl_manager.iter_archive(archive_path["train.other.500"]),
199
- # },
200
- # ),
201
- # ]
202
- # dev_splits = [
203
- # datasets.SplitGenerator(
204
- # name="validation.clean",
205
- # gen_kwargs={
206
- # "local_extracted_archive": local_extracted_archive.get("dev.clean"),
207
- # "files": dl_manager.iter_archive(archive_path["dev.clean"]),
208
- # },
209
- # ),
210
- # datasets.SplitGenerator(
211
- # name="validation.other",
212
- # gen_kwargs={
213
- # "local_extracted_archive": local_extracted_archive.get("dev.other"),
214
- # "files": dl_manager.iter_archive(archive_path["dev.other"]),
215
- # },
216
- # ),
217
- # ]
218
- # test_splits = [
219
- # datasets.SplitGenerator(
220
- # name="test.clean",
221
- # gen_kwargs={
222
- # "local_extracted_archive": local_extracted_archive.get("test.clean"),
223
- # "files": dl_manager.iter_archive(archive_path["test.clean"]),
224
- # },
225
- # ),
226
- # datasets.SplitGenerator(
227
- # name="test.other",
228
- # gen_kwargs={
229
- # "local_extracted_archive": local_extracted_archive.get("test.other"),
230
- # "files": dl_manager.iter_archive(archive_path["test.other"]),
231
- # },
232
- # ),
233
- # ]
234
-
235
- #return train_splits + dev_splits + test_splits
236
-
237
- def _generate_examples(self, meta_path, local_extracted_archive):
238
- """Generate examples from a LibriSpeech archive_path."""
239
- data_fields = list(self._info().features.keys())
240
- metadata = {}
241
- with open(meta_path, encoding="utf-8") as f:
242
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
243
- for row in tqdm(reader, desc="Reading metadata..."):
244
- if not row["audio_id"].endswith(".mp3"):
245
- row["audio_id"] += ".mp3"
246
- for field in data_fields:
247
- if field not in row:
248
- row[field] = ""
249
- metadata[row["path"]] = row
250
-
251
- for filename, file in local_extracted_archive:
252
- _, filename = os.path.split(filename)
253
- if filename in metadata:
254
- result = dict(metadata[filename])
255
- # set the audio feature and the path to the extracted file
256
- path = os.path.join(local_extracted_archive,
257
- filename) if local_extracted_archive else filename
258
- result["audio"] = {"path": path, "bytes": file.read()}
259
- # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
260
- result["path"] = path if local_extracted_archive else filename
261
-
262
- yield path, result
 
1
+ # coding=utf-8
2
+ # Lint as: python3
3
+ """test set"""
4
+
5
+
6
+ import csv
7
+ import os
8
+ import json
9
+
10
+ import datasets
11
+ from datasets.utils.py_utils import size_str
12
+ from tqdm import tqdm
13
+
14
+
15
+ _CITATION = """\
16
+ @inproceedings{panayotov2015librispeech,
17
+ title={Librispeech: an ASR corpus based on public domain audio books},
18
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
19
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
20
+ pages={5206--5210},
21
+ year={2015},
22
+ organization={IEEE}
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ Lorem ipsum
28
+ """
29
+
30
+ _BASE_URL = "https://huggingface.co/datasets/j-krzywdziak/test/tree/main"
31
+ _DATA_URL = _BASE_URL + "dev.tar.gz"
32
+ _PROMPTS_URLS = _BASE_URL + "dev.tsv"
33
+
34
+ logger = datasets.logging.get_logger(__name__)
35
+
36
+ class TestConfig(datasets.BuilderConfig):
37
+ """Lorem impsum."""
38
+
39
+ def __init__(self, name, version, **kwargs):
40
+ # self.language = kwargs.pop("language", None)
41
+ # self.release_date = kwargs.pop("release_date", None)
42
+ # self.num_clips = kwargs.pop("num_clips", None)
43
+ # self.num_speakers = kwargs.pop("num_speakers", None)
44
+ # self.validated_hr = kwargs.pop("validated_hr", None)
45
+ # self.total_hr = kwargs.pop("total_hr", None)
46
+ # self.size_bytes = kwargs.pop("size_bytes", None)
47
+ # self.size_human = size_str(self.size_bytes)
48
+ description = (
49
+ f"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "
50
+ f"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud "
51
+ f"exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure "
52
+ f"dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. "
53
+ f"Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt "
54
+ f"mollit anim id est laborum."
55
+ )
56
+ super(TestConfigConfig, self).__init__(
57
+ name=name,
58
+ version=datasets.Version(version),
59
+ description=description,
60
+ **kwargs,
61
+ )
62
+
63
+ class TestASR(datasets.GeneratorBasedBuilder):
64
+ """Lorem ipsum."""
65
+ DEFAULT_CONFIG_NAME = "all"
66
+
67
+ BUILDER_CONFIGS = [
68
+ TestConfig(
69
+ name="Test Dataset",
70
+ version="0.0.0",
71
+ )
72
+ ]
73
+
74
+ def _info(self):
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=datasets.Features(
78
+ {
79
+ "audio_id": datasets.Value("string"),
80
+ "audio": datasets.Audio(sampling_rate=16_000),
81
+ "ngram": datasets.Value("string")
82
+ }
83
+ ),
84
+ supervised_keys=None,
85
+ homepage=_BASE_URL,
86
+ citation=_CITATION
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ archive_path = dl_manager.download(_DATA_URL)
91
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
92
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
93
+ meta_path = dl_manager.download(_PROMPTS_URLS)
94
+ return [datasets.SplitGenerator(
95
+ name=datasets.Split.TEST,
96
+ gen_kwargs={
97
+ "meta_file": meta_path,
98
+ "audio_files": dl_manager.iter_archive(local_extracted_archive)
99
+ }
100
+ )]
101
+
102
+ def _generate_examples(self, meta_path, audio_files):
103
+ """Lorem ipsum."""
104
+ metadata = {}
105
+ with open(meta_path, encoding="utf-8") as f:
106
+ for row in f:
107
+ audio_id = row.splt("\t")[0]
108
+ ngram = row.split("\t")[1]
109
+ metadata[audio_id] = {"audio_id": audio_id,
110
+ "ngram": ngram}
111
+
112
+ inside_clips_dir = True
113
+ id_ = 0
114
+ for path, f in audio_files:
115
+ _, audio_name = os.path.split(path)
116
+ if audio_name in metadata:
117
+ audio = {"bytes": f.read()}
118
+ yield id_, {**metadata[audio_id], "audio": audio}
119
+ id_ +=1