ryzzlestrizzle commited on
Commit
0124852
1 Parent(s): 87310a1

Delete multi-wiki-clustering-p2p.py

Browse files
Files changed (1) hide show
  1. multi-wiki-clustering-p2p.py +0 -88
multi-wiki-clustering-p2p.py DELETED
@@ -1,88 +0,0 @@
1
- import gzip
2
- import json
3
- from collections.abc import Generator
4
-
5
- import datasets
6
-
7
- _LANGUAGES = {
8
- "da": "Danish",
9
- "lv": "Latvian",
10
- "sq": "Albanian",
11
- "gv": "Manx",
12
- }
13
- _ALL_LANGUAGES = "all_languages"
14
- _DOWNLOAD_URL = "{lang}/{split}.jsonl.gz"
15
- _VERSION = "1.0.0"
16
- _DESCRIPTION = """
17
- A dataset of wikipedia paragraphs and corresponding top-level categories for multilingual clustering.
18
- """
19
-
20
-
21
- class WikiClusteringP2PConfig(datasets.BuilderConfig):
22
- """BuilderConfig for AmazonReviewsMultiConfig."""
23
-
24
- def __init__(self, languages: dict[str, str] | None = None, **kwargs): # noqa: ANN003
25
- super().__init__(version=datasets.Version(_VERSION, ""), **kwargs)
26
- self.languages = languages
27
-
28
-
29
- class WikiClusteringP2P(datasets.GeneratorBasedBuilder):
30
-
31
- """Wikipedia Clustering"""
32
-
33
- BUILDER_CONFIGS = [
34
- WikiClusteringP2PConfig(
35
- name=_ALL_LANGUAGES,
36
- languages=_LANGUAGES,
37
- description="A collection of wikipedia paragraphs and category labels to aid in multilingual clustering evaluation.",
38
- ),
39
- ] + [
40
- WikiClusteringP2PConfig(
41
- name=lang,
42
- languages=[lang],
43
- description=f"{_LANGUAGES[lang]} articles/labels for wikipedia articles",
44
- )
45
- for lang in _LANGUAGES
46
- ]
47
- BUILDER_CONFIG_CLASS = WikiClusteringP2PConfig
48
- DEFAULT_CONFIG_NAME = _ALL_LANGUAGES
49
-
50
- def _info(self) -> datasets.DatasetInfo:
51
- return datasets.DatasetInfo(
52
- description=_DESCRIPTION,
53
- supervised_keys=None,
54
- )
55
-
56
- def _split_generators(
57
- self,
58
- dl_manager: datasets.DownloadManager,
59
- ) -> list[datasets.SplitGenerator]:
60
- test_urls = [
61
- _DOWNLOAD_URL.format(split="test", lang=lang)
62
- for lang in self.config.languages
63
- ]
64
-
65
- test_paths = dl_manager.download_and_extract(test_urls)
66
-
67
- return [
68
- datasets.SplitGenerator(
69
- name=datasets.Split.TRAIN,
70
- gen_kwargs={"file_paths": []},
71
- ),
72
- datasets.SplitGenerator(
73
- name=datasets.Split.VALIDATION,
74
- gen_kwargs={"file_paths": []},
75
- ),
76
- datasets.SplitGenerator(
77
- name=datasets.Split.TEST,
78
- gen_kwargs={"file_paths": test_paths},
79
- ),
80
- ]
81
-
82
- def _generate_examples(self, file_paths: list[str]) -> Generator[tuple[int, dict]]:
83
- row_count = 0
84
- for file_path in file_paths:
85
- with gzip.open(file_path, "rt", encoding="utf-8") as f:
86
- for line in f:
87
- yield row_count, json.loads(line)
88
- row_count += 1