clean
Browse files- wikipedia_html_enterprise.py +29 -67
wikipedia_html_enterprise.py
CHANGED
|
@@ -17,18 +17,9 @@
|
|
| 17 |
"""Wikipedia dataset containing cleaned articles of all languages."""
|
| 18 |
|
| 19 |
|
| 20 |
-
import bz2
|
| 21 |
-
import codecs
|
| 22 |
import json
|
| 23 |
-
import re
|
| 24 |
-
import xml.etree.cElementTree as etree
|
| 25 |
-
from urllib.parse import quote
|
| 26 |
-
import mwparserfromhell
|
| 27 |
-
from multiprocess import Process, Manager
|
| 28 |
from tqdm import tqdm
|
| 29 |
-
import multiprocessing
|
| 30 |
import datasets
|
| 31 |
-
from functools import partial
|
| 32 |
from pathlib import Path
|
| 33 |
|
| 34 |
logger = datasets.logging.get_logger(__name__)
|
|
@@ -38,19 +29,13 @@ _CITATION = """"""
|
|
| 38 |
|
| 39 |
_DESCRIPTION = """"""
|
| 40 |
|
| 41 |
-
_LICENSE =
|
| 42 |
-
"This work is licensed under the Creative Commons Attribution-ShareAlike "
|
| 43 |
-
"3.0 Unported License. To view a copy of this license, visit "
|
| 44 |
-
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
|
| 45 |
-
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
|
| 46 |
-
)
|
| 47 |
-
|
| 48 |
-
_INFO_FILE = "dumpstatus.json"
|
| 49 |
|
| 50 |
|
| 51 |
_VERSION = datasets.Version("2.0.0", "")
|
| 52 |
_NUM_SPLITS = 68
|
| 53 |
|
|
|
|
| 54 |
class WikipediaConfig(datasets.BuilderConfig):
|
| 55 |
"""BuilderConfig for Wikipedia."""
|
| 56 |
|
|
@@ -102,15 +87,17 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
|
|
| 102 |
"identifier": datasets.Value("string"),
|
| 103 |
"url": datasets.Value("string"),
|
| 104 |
},
|
| 105 |
-
"is_part_of"
|
| 106 |
"name": datasets.Value("string"),
|
| 107 |
"identifier": datasets.Value("string"),
|
| 108 |
},
|
| 109 |
-
"license":[
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
|
|
|
|
|
|
| 114 |
}
|
| 115 |
),
|
| 116 |
# No default supervised_keys.
|
|
@@ -120,58 +107,33 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
|
|
| 120 |
)
|
| 121 |
|
| 122 |
def _split_generators(self, dl_manager):
|
| 123 |
-
data_paths = [
|
| 124 |
-
Path(self.config.data_dir) / f"enwiki_{self.config.shard}.ndjson"
|
| 125 |
-
]
|
| 126 |
return [
|
| 127 |
datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
|
| 128 |
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": data_paths}
|
| 129 |
)
|
| 130 |
]
|
| 131 |
|
| 132 |
-
def _generate_examples(
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
for filepath in filepaths:
|
| 138 |
-
with open(filepath,
|
| 139 |
for line in tqdm(f):
|
| 140 |
example = json.loads(line)
|
| 141 |
clean_example = {}
|
| 142 |
-
clean_example[
|
| 143 |
-
clean_example[
|
| 144 |
-
clean_example[
|
| 145 |
-
clean_example[
|
| 146 |
-
clean_example[
|
| 147 |
clean_example["categories"] = example.get("categories", None)
|
| 148 |
-
clean_example[
|
| 149 |
-
clean_example[
|
| 150 |
-
clean_example[
|
| 151 |
-
clean_example[
|
| 152 |
-
clean_example[
|
| 153 |
-
clean_example[
|
| 154 |
-
clean_example[
|
| 155 |
-
yield clean_example[
|
| 156 |
-
# num_processes = 16
|
| 157 |
-
# with multiprocessing.Pool(processes=num_processes) as pool:
|
| 158 |
-
|
| 159 |
-
# results = pool.imap_unordered(partial(parse_and_clean), filepaths)
|
| 160 |
-
# for result in results:
|
| 161 |
-
# for example in result:
|
| 162 |
-
# yield example
|
| 163 |
-
|
| 164 |
-
def parse_and_clean(filepath):
|
| 165 |
-
examples = []
|
| 166 |
-
with open(filepath, 'r') as f:
|
| 167 |
-
for line in tqdm(f):
|
| 168 |
-
example = json.loads(line)
|
| 169 |
-
clean_example = {}
|
| 170 |
-
clean_example['id'] = example['identifier']
|
| 171 |
-
clean_example['date_modified'] = example['date_modified']
|
| 172 |
-
clean_example['url'] = example['url']
|
| 173 |
-
clean_example['html'] = f'{example["article_body"]["html"]}'
|
| 174 |
-
clean_example['wikitext'] = example['article_body']['wikitext']
|
| 175 |
-
|
| 176 |
-
examples.append(clean_example)
|
| 177 |
-
return examples
|
|
|
|
| 17 |
"""Wikipedia dataset containing cleaned articles of all languages."""
|
| 18 |
|
| 19 |
|
|
|
|
|
|
|
| 20 |
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
from tqdm import tqdm
|
|
|
|
| 22 |
import datasets
|
|
|
|
| 23 |
from pathlib import Path
|
| 24 |
|
| 25 |
logger = datasets.logging.get_logger(__name__)
|
|
|
|
| 29 |
|
| 30 |
_DESCRIPTION = """"""
|
| 31 |
|
| 32 |
+
_LICENSE = """"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
_VERSION = datasets.Version("2.0.0", "")
|
| 36 |
_NUM_SPLITS = 68
|
| 37 |
|
| 38 |
+
|
| 39 |
class WikipediaConfig(datasets.BuilderConfig):
|
| 40 |
"""BuilderConfig for Wikipedia."""
|
| 41 |
|
|
|
|
| 87 |
"identifier": datasets.Value("string"),
|
| 88 |
"url": datasets.Value("string"),
|
| 89 |
},
|
| 90 |
+
"is_part_of": {
|
| 91 |
"name": datasets.Value("string"),
|
| 92 |
"identifier": datasets.Value("string"),
|
| 93 |
},
|
| 94 |
+
"license": [
|
| 95 |
+
{
|
| 96 |
+
"name": datasets.Value("string"),
|
| 97 |
+
"url": datasets.Value("string"),
|
| 98 |
+
"identifier": datasets.Value("string"),
|
| 99 |
+
}
|
| 100 |
+
],
|
| 101 |
}
|
| 102 |
),
|
| 103 |
# No default supervised_keys.
|
|
|
|
| 107 |
)
|
| 108 |
|
| 109 |
def _split_generators(self, dl_manager):
|
| 110 |
+
data_paths = [Path(self.config.data_dir) / f"enwiki_{self.config.shard}.ndjson"]
|
|
|
|
|
|
|
| 111 |
return [
|
| 112 |
datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
|
| 113 |
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": data_paths}
|
| 114 |
)
|
| 115 |
]
|
| 116 |
|
| 117 |
+
def _generate_examples(
|
| 118 |
+
self,
|
| 119 |
+
filepaths,
|
| 120 |
+
):
|
|
|
|
| 121 |
for filepath in filepaths:
|
| 122 |
+
with open(filepath, "r") as f:
|
| 123 |
for line in tqdm(f):
|
| 124 |
example = json.loads(line)
|
| 125 |
clean_example = {}
|
| 126 |
+
clean_example["name"] = example["name"]
|
| 127 |
+
clean_example["identifier"] = example["identifier"]
|
| 128 |
+
clean_example["date_modified"] = example["date_modified"]
|
| 129 |
+
clean_example["namespace_name"] = example["namespace"]["name"]
|
| 130 |
+
clean_example["namespace_identifier"] = example["namespace"]["identifier"]
|
| 131 |
clean_example["categories"] = example.get("categories", None)
|
| 132 |
+
clean_example["url"] = example["url"]
|
| 133 |
+
clean_example["html"] = f'{example["article_body"]["html"]}'
|
| 134 |
+
clean_example["wikitext"] = example["article_body"]["wikitext"]
|
| 135 |
+
clean_example["in_language"] = example["in_language"]
|
| 136 |
+
clean_example["main_entity"] = example.get("main_entity", None)
|
| 137 |
+
clean_example["is_part_of"] = example["is_part_of"]
|
| 138 |
+
clean_example["license"] = example["license"]
|
| 139 |
+
yield clean_example["identifier"], clean_example
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|