albertvillanova HF staff commited on
Commit
ac1fc9b
1 Parent(s): 1272c10

Delete loading script

Browse files
Files changed (1) hide show
  1. best2009.py +0 -138
best2009.py DELETED
@@ -1,138 +0,0 @@
1
- import os
2
- from functools import reduce
3
- from pathlib import Path
4
-
5
- import datasets
6
-
7
-
8
- _CITATION = """\
9
- @inproceedings{kosawat2009best,
10
- title={BEST 2009: Thai word segmentation software contest},
11
- author={Kosawat, Krit and Boriboon, Monthika and Chootrakool, Patcharika and Chotimongkol, Ananlada and Klaithin, Supon and Kongyoung, Sarawoot and Kriengket, Kanyanut and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and others},
12
- booktitle={2009 Eighth International Symposium on Natural Language Processing},
13
- pages={83--88},
14
- year={2009},
15
- organization={IEEE}
16
- }
17
- @inproceedings{boriboon2009best,
18
- title={Best corpus development and analysis},
19
- author={Boriboon, Monthika and Kriengket, Kanyanut and Chootrakool, Patcharika and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and Kosawat, Krit},
20
- booktitle={2009 International Conference on Asian Language Processing},
21
- pages={322--327},
22
- year={2009},
23
- organization={IEEE}
24
- }
25
- """
26
-
27
- _LICENSE = "CC-BY-NC-SA 3.0"
28
-
29
- _DESCRIPTION = """\
30
- `best2009` is a Thai word-tokenization dataset from encyclopedia, novels, news and articles by
31
- [NECTEC](https://www.nectec.or.th/) (148,995/2,252 lines of train/test). It was created for
32
- [BEST 2010: Word Tokenization Competition](https://thailang.nectec.or.th/archive/indexa290.html?q=node/10).
33
- The test set answers are not provided publicly.
34
- """
35
-
36
-
37
- class Best2009Config(datasets.BuilderConfig):
38
- def __init__(self, **kwargs):
39
- """BuilderConfig
40
-
41
- Args:
42
- **kwargs: keyword arguments forwarded to super.
43
- """
44
- super(Best2009Config, self).__init__(**kwargs)
45
-
46
-
47
- class Best2009(datasets.GeneratorBasedBuilder):
48
-
49
- _DOWNLOAD_URL = "https://archive.org/download/best_dataset/data.zip"
50
- _TRAIN_FOLDER = "train"
51
- _TEST_FOLDER = "test"
52
-
53
- _USELESS_TAGS = {"<NE>": "", "</NE>": "", "<AB>": "", "</AB>": ""}
54
- # character type mapping from https://github.com/rkcosmos/deepcut/blob/master/deepcut/utils.py
55
- _CHAR_TYPES_DICT = {
56
- "กขฃคฆงจชซญฎฏฐฑฒณดตถทธนบปพฟภมยรลวศษสฬอ": "c",
57
- "ฅฉผฟฌหฮ": "n",
58
- "ะาำิีืึุู": "v", # า ะ ำ ิ ี ึ ื ั ู ุ
59
- "เแโใไ": "w",
60
- "่้๊๋": "t", # วรรณยุกต์ ่ ้ ๊ ๋
61
- "์ๆฯ.": "s", # ์ ๆ ฯ .
62
- "0123456789๑๒๓๔๕๖๗๘๙": "d",
63
- '"': "q",
64
- "‘": "q",
65
- "’": "q",
66
- "'": "q",
67
- " ": "p",
68
- "abcdefghijklmnopqrstuvwxyz": "s_e",
69
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ": "b_e",
70
- }
71
- _CHAR_TYPE_FLATTEN = {}
72
- for ks, v in _CHAR_TYPES_DICT.items():
73
- for k in ks:
74
- _CHAR_TYPE_FLATTEN[k] = v
75
- _CHAR_TYPES = ["b_e", "c", "d", "n", "o", "p", "q", "s", "s_e", "t", "v", "w"]
76
-
77
- BUILDER_CONFIGS = [
78
- Best2009Config(
79
- name="best2009",
80
- version=datasets.Version("1.0.0"),
81
- description=_DESCRIPTION,
82
- ),
83
- ]
84
-
85
- def _info(self):
86
- return datasets.DatasetInfo(
87
- description=_DESCRIPTION,
88
- features=datasets.Features(
89
- {
90
- "fname": datasets.Value("string"),
91
- "char": datasets.Sequence(datasets.Value("string")),
92
- "char_type": datasets.Sequence(datasets.features.ClassLabel(names=self._CHAR_TYPES)),
93
- "is_beginning": datasets.Sequence(datasets.features.ClassLabel(names=["neg", "pos"])),
94
- }
95
- ),
96
- supervised_keys=None,
97
- homepage="https://aiforthai.in.th/",
98
- citation=_CITATION,
99
- license=_LICENSE,
100
- )
101
-
102
- def _split_generators(self, dl_manager):
103
- arch_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
104
- data_dir = os.path.join(arch_path, "data")
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- gen_kwargs={"filepath": os.path.join(data_dir, self._TRAIN_FOLDER), "split": "train"},
109
- ),
110
- datasets.SplitGenerator(
111
- name=datasets.Split.TEST,
112
- gen_kwargs={"filepath": os.path.join(data_dir, self._TEST_FOLDER), "split": "train"},
113
- ),
114
- ]
115
-
116
- def _generate_examples(self, filepath, split):
117
- for file_idx, fname in enumerate(sorted(Path(filepath).rglob("*.txt"))):
118
- with open(fname, encoding="utf-8") as f:
119
- for line_idx, line in enumerate(f):
120
- chars = []
121
- char_types = []
122
- is_beginnings = []
123
- # replace useless tokens
124
- line = reduce(lambda a, kv: a.replace(*kv), self._USELESS_TAGS.items(), line)
125
- # tokens are pipe separated
126
- splits = line.split("|")
127
- for token in splits:
128
- for i in range(len(token)):
129
- chars.append(token[i])
130
- char_types.append(self._CHAR_TYPE_FLATTEN.get(token[i], "o"))
131
- is_beginning = 1 if i == 0 else 0
132
- is_beginnings.append(is_beginning)
133
- yield f"{file_idx}_{line_idx}", {
134
- "fname": fname.name,
135
- "char": chars,
136
- "char_type": char_types,
137
- "is_beginning": is_beginnings if split == "train" else [0 for i in range(len(chars))],
138
- }