jslin09 commited on
Commit
e862c70
1 Parent(s): 9512113

Delete wikipedia_tw.py

Browse files
Files changed (1) hide show
  1. wikipedia_tw.py +0 -99
wikipedia_tw.py DELETED
@@ -1,99 +0,0 @@
1
- import json
2
- import datasets
3
- from sklearn.model_selection import train_test_split
4
- import pandas as pd
5
- import os
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
-
9
- _CITATION = """\
10
- @article{2016arXiv160605250R,
11
- author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
12
- Konstantin and {Liang}, Percy},
13
- title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
14
- journal = {arXiv e-prints},
15
- year = 2016,
16
- eid = {arXiv:1606.05250},
17
- pages = {arXiv:1606.05250},
18
- archivePrefix = {arXiv},
19
- eprint = {1606.05250},
20
- }
21
- """
22
-
23
- _DESCRIPTION = """\
24
- 解析維基百科打包好的 bz2 檔案,解析出所需內容,利用 wikitextparser 移除 Wiki 標記。\
25
- 本資料檔是解析自維基百科 20230701 繁體中文版打包檔的內容。\
26
- 解析後保留的欄位有兩個:條目名稱(title),條目內容(page article)。\
27
-
28
- 原始的打包檔內容簡繁交雜,所以有利用 OpenCC 進行簡轉繁處理。\
29
-
30
- 原始總條目數: 4,296,654 條目。\
31
- 全部 4,296,249 個條目標題。\
32
- 全部 4,296,249 個條目內容。\
33
- 無法自動去標記的條目數: 5,415\
34
- 有內容的條目數: 4,296,249
35
- """
36
-
37
- _URL = "https://huggingface.co/datasets/jslin09/wikipedia_tw"
38
- #_URLS = {
39
- # "train": _URL + "train-v1.1.json",
40
- # "dev": _URL + "dev-v1.1.json",
41
- #}
42
-
43
- class WikipediaConfig(datasets.BuilderConfig):
44
- """BuilderConfig for Wikipedia_tw."""
45
- def __init__(self, **kwargs):
46
- """BuilderConfig for Wikipedia_tw.
47
- Args:
48
- **kwargs: keyword arguments forwarded to super.
49
- """
50
- super(WikipediaConfig, self).__init__(**kwargs)
51
-
52
- class Wikipedia_tw(datasets.GeneratorBasedBuilder):
53
- """Wikipedia_tw: The Wekipedia Dataset in Traditional Chinese plain text. Version 1.0."""
54
-
55
- BUILDER_CONFIGS = [
56
- WikipediaConfig(
57
- name="plain_text",
58
- version=datasets.Version("1.0.0", ""),
59
- description="The Wekipedia in Traditional Chinese plain text.",
60
- ),
61
- ]
62
-
63
- def _info(self):
64
- return datasets.DatasetInfo(
65
- description=_DESCRIPTION,
66
- features=datasets.Features(
67
- {
68
- "title": datasets.Value("string"),
69
- "article": datasets.Value("string")
70
- }
71
- ),
72
- # No default supervised_keys (as we have to pass both question
73
- # and context as input).
74
- supervised_keys=None,
75
- homepage="https://huggingface.co/datasets/jslin09/wikipedia_tw",
76
- # citation=_CITATION,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- downloaded_files = dl_manager.download_and_extract(_URL)
81
-
82
- return [
83
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files}),
84
- # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
85
- ]
86
-
87
- def _generate_examples(self, filepath):
88
- logger.info("generating examples from = %s", filepath)
89
- key = 0
90
- with open(filepath, 'r', encoding='UTF-8') as f:
91
- wikipedia = json.load(f, strict=False,)
92
- for page_index in len(wikipedia):
93
- title = wikipedia[page_index]["title"]
94
- article = wikipedia[page_index]["article"]
95
- yield key, {
96
- "title": title,
97
- "article": article,
98
- }
99
- key=key+1