conceptofmind commited on
Commit
d44ae80
·
verified ·
1 Parent(s): 227b186

Delete megawika.py

Browse files
Files changed (1) hide show
  1. megawika.py +0 -197
megawika.py DELETED
@@ -1,197 +0,0 @@
1
- """MegaWika dataset loading script for HuggingFace Datasets."""
2
-
3
- import csv
4
- import json
5
- import os
6
- import re
7
- import pathlib
8
- from pathlib import Path
9
- import yaml
10
- from ast import literal_eval
11
- import urllib.request
12
- import datasets
13
-
14
- _CITATION = """\
15
- @article{barham2023megawika,
16
- title={MegaWika: Millions of reports and their sources across 50 diverse languages},
17
- author={Barham, Samuel and Weller, Orion and
18
- Yuan, Michelle and Murray, Kenton and
19
- Yarmohammadi, Mahsa and Jiang, Zhengping and
20
- Vashishtha, Siddharth and Martin, Alexander and
21
- Liu, Anqi and White, Aaron Steven and
22
- Boyd-Graber, Jordan and Van Durme, Benjamin
23
- },
24
- journal={INSERT ARXIV PREPRINT ID HERE},
25
- year={2023}
26
- }
27
- """
28
-
29
- _DESCRIPTION = """\
30
- MegaWika is a multi- and crosslingual text dataset containing 30 million
31
- Wikipedia passages with their scraped and cleaned web citations across 50 languages.
32
- """
33
-
34
- _HOMEPAGE = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
35
- _LICENSE = "cc-by-sa-4.0"
36
- _URL = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
37
-
38
- def load_file_paths():
39
- """Load and parse the files.yml containing dataset file paths.
40
-
41
- Expected YAML structure:
42
- en:
43
- - https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/resolve/main/en/en-00000-of-06154.jsonl
44
- fr:
45
- - https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/resolve/main/fr/fr-00000-of-00123.jsonl
46
- ...
47
-
48
- Returns:
49
- dict: Dictionary mapping language codes to lists of file URLs
50
- """
51
- file_list_url = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/raw/main/files.yml"
52
- try:
53
- with urllib.request.urlopen(file_list_url) as f:
54
- # Direct YAML parsing - the structure is already in the correct format
55
- return yaml.safe_load(f)
56
- except (yaml.YAMLError, urllib.error.URLError) as exc:
57
- print(f"Error loading dataset file paths: {exc}")
58
- return {}
59
-
60
- class MegaWikaConfig(datasets.BuilderConfig):
61
- """BuilderConfig for MegaWika."""
62
-
63
- def __init__(self, language=None, **kwargs):
64
- """BuilderConfig for MegaWika.
65
-
66
- Args:
67
- language: Language identifier for the dataset split
68
- **kwargs: Keyword arguments forwarded to super.
69
- """
70
- super().__init__(**kwargs)
71
- self.language = language
72
-
73
- class MegaWika(datasets.GeneratorBasedBuilder):
74
- """MegaWika dataset."""
75
-
76
- VERSION = datasets.Version("1.0.0")
77
-
78
- # Load available languages directly from YAML structure
79
- LANGUAGES = list(load_file_paths().keys())
80
-
81
- BUILDER_CONFIGS = ([
82
- MegaWikaConfig(
83
- name="all",
84
- language=None,
85
- version=VERSION,
86
- description="Complete MegaWika dataset across all languages",
87
- )
88
- ] + [
89
- MegaWikaConfig(
90
- name=lang,
91
- language=lang,
92
- version=VERSION,
93
- description=f"MegaWika dataset for {lang} language",
94
- )
95
- for lang in LANGUAGES
96
- ])
97
-
98
- DEFAULT_CONFIG_NAME = "all"
99
-
100
- def _split_generators(self, dl_manager):
101
- """Returns SplitGenerators."""
102
- # Load the file paths afresh to ensure we have the latest data
103
- data_sources = load_file_paths()
104
-
105
- if self.config.name == "all":
106
- # Process all languages
107
- selected_sources = data_sources
108
- else:
109
- # Process single language
110
- if self.config.name not in data_sources:
111
- raise ValueError(
112
- f"Language '{self.config.name}' not found in available languages: {list(data_sources.keys())}"
113
- )
114
- selected_sources = {self.config.name: data_sources[self.config.name]}
115
-
116
- return [
117
- datasets.SplitGenerator(
118
- name=datasets.Split.TRAIN,
119
- gen_kwargs={
120
- "filepaths": dl_manager.download(urls),
121
- "language": lang
122
- }
123
- )
124
- for lang, urls in selected_sources.items()
125
- ]
126
-
127
- def _get_qa_pair_list_features(self, qa_pair, feature_name):
128
- """Helper function to extract QA pair features."""
129
- if feature_name in qa_pair and qa_pair[feature_name]:
130
- return qa_pair[feature_name]
131
- elif feature_name.startswith('en'):
132
- base_feature = '_'.join(feature_name.split('_')[1:])
133
- if base_feature in qa_pair and qa_pair[base_feature]:
134
- return qa_pair[base_feature]
135
- return []
136
-
137
- def _generate_examples(self, filepaths, language):
138
- """Yields examples."""
139
- _id = 0
140
- for filepath in filepaths:
141
- try:
142
- with open(filepath, "r", encoding="utf-8") as f:
143
- for line in f:
144
- if line.strip():
145
- example = json.loads(line)
146
- if isinstance(example, dict):
147
- yield _id, {
148
- "article_title": example.get("article_title", ""),
149
- "article_text": example.get("article_text", ""),
150
- "entries": [
151
- {
152
- "id": entry.get("id", "").lower(),
153
- "passage": {
154
- "text": entry['passage'].get("text", []),
155
- "parse": json.dumps(entry['passage'].get("parse", [{}])),
156
- "en_tokens": list(entry['passage'].get("en_tokens", {}).values()),
157
- "lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
158
- "en_lang_token_map": [
159
- [int(k), int(v)] for k, v in
160
- entry['passage'].get("en_lang_token_map", {}).items()
161
- ]
162
- },
163
- "mt": {
164
- "original": entry.get("original", ""),
165
- "original_sents": entry.get("original_sents", []),
166
- "translation": entry.get("translation", ""),
167
- "translation_sents": entry.get("translation_sents", []),
168
- "translation_probs": entry.get("translation_probs", [[]]),
169
- "repetitious_translation": entry.get("repetitious_translation", False)
170
- },
171
- "source_lang": entry.get("source_lang", ""),
172
- "source_url": entry.get("source_url", ""),
173
- "source_text": entry.get("source_text", ""),
174
- "qa_pairs": [
175
- {
176
- "question": qa_pair.get('question', ""),
177
- "en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
178
- 'lang_answer': qa_pair.get('lang_answer', ''),
179
- 'frames': qa_pair.get('frames', []),
180
- "en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
181
- "en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
182
- "lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
183
- "lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
184
- "passage": qa_pair.get('passage', []),
185
- "en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
186
- "match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
187
- }
188
- for qa_pair in entry.get('qa_pairs', [])
189
- ]
190
- }
191
- for entry in example.get("entries", [])
192
- ]
193
- }
194
- _id += 1
195
- except Exception as e:
196
- print(f"Error reading file {filepath}: {str(e)}")
197
- continue