parquet-converter commited on
Commit
4f8f8ca
1 Parent(s): e25239f

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,229 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - expert-generated
4
- language_creators:
5
- - found
6
- language:
7
- - th
8
- license:
9
- - cc-by-nc-sa-3.0
10
- multilinguality:
11
- - monolingual
12
- size_categories:
13
- - 100K<n<1M
14
- source_datasets:
15
- - original
16
- task_categories:
17
- - token-classification
18
- task_ids: []
19
- paperswithcode_id: null
20
- pretty_name: best2009
21
- tags:
22
- - word-tokenization
23
- dataset_info:
24
- features:
25
- - name: fname
26
- dtype: string
27
- - name: char
28
- sequence: string
29
- - name: char_type
30
- sequence:
31
- class_label:
32
- names:
33
- 0: b_e
34
- 1: c
35
- 2: d
36
- 3: n
37
- 4: o
38
- 5: p
39
- 6: q
40
- 7: s
41
- 8: s_e
42
- 9: t
43
- 10: v
44
- 11: w
45
- - name: is_beginning
46
- sequence:
47
- class_label:
48
- names:
49
- 0: neg
50
- 1: pos
51
- config_name: best2009
52
- splits:
53
- - name: train
54
- num_bytes: 483129998
55
- num_examples: 148995
56
- - name: test
57
- num_bytes: 10498726
58
- num_examples: 2252
59
- download_size: 13891260
60
- dataset_size: 493628724
61
- ---
62
-
63
- # Dataset Card for `best2009`
64
-
65
- ## Table of Contents
66
- - [Dataset Description](#dataset-description)
67
- - [Dataset Summary](#dataset-summary)
68
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
69
- - [Languages](#languages)
70
- - [Dataset Structure](#dataset-structure)
71
- - [Data Instances](#data-instances)
72
- - [Data Fields](#data-fields)
73
- - [Data Splits](#data-splits)
74
- - [Dataset Creation](#dataset-creation)
75
- - [Curation Rationale](#curation-rationale)
76
- - [Source Data](#source-data)
77
- - [Annotations](#annotations)
78
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
79
- - [Considerations for Using the Data](#considerations-for-using-the-data)
80
- - [Social Impact of Dataset](#social-impact-of-dataset)
81
- - [Discussion of Biases](#discussion-of-biases)
82
- - [Other Known Limitations](#other-known-limitations)
83
- - [Additional Information](#additional-information)
84
- - [Dataset Curators](#dataset-curators)
85
- - [Licensing Information](#licensing-information)
86
- - [Citation Information](#citation-information)
87
- - [Contributions](#contributions)
88
-
89
- ## Dataset Description
90
-
91
- - **Homepage:** https://aiforthai.in.th/
92
- - **Repository:** https://aiforthai.in.th/corpus.php
93
- - **Paper:**
94
- - **Leaderboard:**
95
- - **Point of Contact:** https://aiforthai.in.th/
96
-
97
- ### Dataset Summary
98
-
99
- `best2009` is a Thai word-tokenization dataset from encyclopedia, novels, news and articles by [NECTEC](https://www.nectec.or.th/) (148,995/2,252 lines of train/test). It was created for [BEST 2010: Word Tokenization Competition](https://thailang.nectec.or.th/archive/indexa290.html?q=node/10). The test set answers are not provided publicly.
100
-
101
- ### Supported Tasks and Leaderboards
102
-
103
- word tokenization
104
-
105
- ### Languages
106
-
107
- Thai
108
-
109
- ## Dataset Structure
110
-
111
- ### Data Instances
112
-
113
- ```
114
- {'char': ['?', 'ภ', 'ู', 'ม', 'ิ', 'ป', 'ั', 'ญ', 'ญ', 'า', 'ช', 'า', 'ว', 'บ', '้', 'า', 'น', '\n'], 'char_type': [4, 1, 10, 1, 10, 1, 4, 1, 1, 10, 1, 10, 1, 1, 9, 10, 1, 4], 'fname': 'encyclopedia_00031.txt', 'is_beginning': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1]}
115
- {'char': ['ภ', 'ู', 'ม', 'ิ', 'ป', 'ั', 'ญ', 'ญ', 'า', 'ช', 'า', 'ว', 'บ', '้', 'า', 'น', ' ', 'ห', 'ม', 'า', 'ย', 'ถ', 'ึ', 'ง', ' ', 'ค', 'ว', 'า', 'ม', 'ร', 'ู', '้', 'ข', 'อ', 'ง', 'ช', 'า', 'ว', 'บ', '้', 'า', 'น', ' ', 'ซ', 'ึ', '่', 'ง', 'เ', 'ร', 'ี', 'ย', 'น', 'ร', 'ู', '้', 'ม', 'า', 'จ', 'า', 'ก', 'พ', '่', 'อ', 'แ', 'ม', '่', ' ', 'ป', 'ู', '่', 'ย', '่', 'า', 'ต', 'า', 'ย', 'า', 'ย', ' ', 'ญ', 'า', 'ต', 'ิ', 'พ', 'ี', '่', 'น', '้', 'อ', 'ง', ' ', 'ห', 'ร', 'ื', 'อ', 'ผ', 'ู', '้', 'ม', 'ี', 'ค', 'ว', 'า', 'ม', 'ร', 'ู', '้', 'ใ', 'น', 'ห', 'ม', 'ู', '่', 'บ', '้', 'า', 'น', 'ใ', 'น', 'ท', '้', 'อ', 'ง', 'ถ', 'ิ', '่', 'น', 'ต', '่', 'า', 'ง', 'ๆ', '\n'], 'char_type': [1, 10, 1, 10, 1, 4, 1, 1, 10, 1, 10, 1, 1, 9, 10, 1, 5, 3, 1, 10, 1, 1, 10, 1, 5, 1, 1, 10, 1, 1, 10, 9, 1, 1, 1, 1, 10, 1, 1, 9, 10, 1, 5, 1, 10, 9, 1, 11, 1, 10, 1, 1, 1, 10, 9, 1, 10, 1, 10, 1, 1, 9, 1, 11, 1, 9, 5, 1, 10, 9, 1, 9, 10, 1, 10, 1, 10, 1, 5, 1, 10, 1, 10, 1, 10, 9, 1, 9, 1, 1, 5, 3, 1, 10, 1, 3, 10, 9, 1, 10, 1, 1, 10, 1, 1, 10, 9, 11, 1, 3, 1, 10, 9, 1, 9, 10, 1, 11, 1, 1, 9, 1, 1, 1, 10, 9, 1, 1, 9, 10, 1, 7, 4], 'fname': 'encyclopedia_00031.txt', 'is_beginning': [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]}
116
- ```
117
-
118
- ### Data Fields
119
-
120
- - `fname`: file name; also marks if article is articles, news, encyclopedia or novels
121
- - `char`: characters
122
- - `char_type`: character types as adopted from []() by [deepcut](https://github.com/rkcosmos/deepcut)
123
- - `is_beginning`: is beginning of word
124
-
125
- ### Data Splits
126
-
127
- | | train | test |
128
- |-------------------------|------------|---------|
129
- | # lines | 148,995 | 2,252 |
130
- | avg words per line | 39.05 | NA |
131
- | total words | 5,818,521 | NA |
132
- | avg characters per line | 140.39 | 202.79 |
133
- | total characters | 20,918,132 | 456,684 |
134
- | # lines articles | 16,990 | NA |
135
- | # lines encyclopedia | 50,631 | NA |
136
- | # lines novels | 50,140 | NA |
137
- | # lines news | 31,234 | NA |
138
-
139
- ## Dataset Creation
140
-
141
- ### Curation Rationale
142
-
143
- The dataset was created for [BEST 2010: Word Tokenization Competition](https://thailang.nectec.or.th/archive/indexa290.html?q=node/10) by [NECTEC](https://www.nectec.or.th/).
144
-
145
- ### Source Data
146
-
147
- #### Initial Data Collection and Normalization
148
-
149
- [More Information Needed]
150
-
151
- #### Who are the source language producers?
152
-
153
- Respective authors of the articles, news, encyclopedia and novels
154
-
155
- ### Annotations
156
-
157
- #### Annotation process
158
-
159
- Detailed annotation guidelines can be found in `BEST_Guideline_Release1.pdf` as part of the uncompressed files. Word tokenization standard used was [InterBEST2009](http://hltshare.fbk.eu/IWSLT2015/InterBEST2009Guidelines-2.pdf)
160
-
161
- #### Who are the annotators?
162
-
163
- [More Information Needed]
164
-
165
- ### Personal and Sensitive Information
166
-
167
- All data are curated from public sources. No personal and sensitive information is expected to be included.
168
-
169
- ## Considerations for Using the Data
170
-
171
- ### Social Impact of Dataset
172
-
173
- - word tokenization dataset from articles, news, encyclopedia and novels
174
-
175
- ### Discussion of Biases
176
-
177
- - texts are relatively formal ones from articles, news, encyclopedia and novels.
178
- - word tokenization standard used was [InterBEST2009](http://hltshare.fbk.eu/IWSLT2015/InterBEST2009Guidelines-2.pdf).
179
-
180
- ### Other Known Limitations
181
-
182
- - some tags unrelated to word tokenization (`<NE>` and `<AB>`) are cleaned out.
183
- - no word boundary provdied for the test set
184
-
185
- ## Additional Information
186
-
187
- ### Dataset Curators
188
-
189
- [NECTEC](https://www.nectec.or.th/)
190
-
191
- ### Licensing Information
192
-
193
- CC-BY-NC-SA 3.0
194
-
195
- ### Citation Information
196
-
197
- Dataset:
198
- ```
199
- @inproceedings{kosawat2009best,
200
- title={BEST 2009: Thai word segmentation software contest},
201
- author={Kosawat, Krit and Boriboon, Monthika and Chootrakool, Patcharika and Chotimongkol, Ananlada and Klaithin, Supon and Kongyoung, Sarawoot and Kriengket, Kanyanut and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and others},
202
- booktitle={2009 Eighth International Symposium on Natural Language Processing},
203
- pages={83--88},
204
- year={2009},
205
- organization={IEEE}
206
- }
207
- @inproceedings{boriboon2009best,
208
- title={Best corpus development and analysis},
209
- author={Boriboon, Monthika and Kriengket, Kanyanut and Chootrakool, Patcharika and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and Kosawat, Krit},
210
- booktitle={2009 International Conference on Asian Language Processing},
211
- pages={322--327},
212
- year={2009},
213
- organization={IEEE}
214
- }
215
- ```
216
-
217
- Character type features:
218
- ```
219
- @inproceedings{haruechaiyasak2009tlex,
220
- title={TLex: Thai lexeme analyser based on the conditional random fields},
221
- author={Haruechaiyasak, Choochart and Kongyoung, Sarawoot},
222
- booktitle={Proceedings of 8th International Symposium on Natural Language Processing},
223
- year={2009}
224
- }
225
- ```
226
-
227
- ### Contributions
228
-
229
- Thanks to [@cstorm125](https://github.com/cstorm125) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
best2009.py DELETED
@@ -1,138 +0,0 @@
1
- import os
2
- from functools import reduce
3
- from pathlib import Path
4
-
5
- import datasets
6
-
7
-
8
- _CITATION = """\
9
- @inproceedings{kosawat2009best,
10
- title={BEST 2009: Thai word segmentation software contest},
11
- author={Kosawat, Krit and Boriboon, Monthika and Chootrakool, Patcharika and Chotimongkol, Ananlada and Klaithin, Supon and Kongyoung, Sarawoot and Kriengket, Kanyanut and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and others},
12
- booktitle={2009 Eighth International Symposium on Natural Language Processing},
13
- pages={83--88},
14
- year={2009},
15
- organization={IEEE}
16
- }
17
- @inproceedings{boriboon2009best,
18
- title={Best corpus development and analysis},
19
- author={Boriboon, Monthika and Kriengket, Kanyanut and Chootrakool, Patcharika and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and Kosawat, Krit},
20
- booktitle={2009 International Conference on Asian Language Processing},
21
- pages={322--327},
22
- year={2009},
23
- organization={IEEE}
24
- }
25
- """
26
-
27
- _LICENSE = "CC-BY-NC-SA 3.0"
28
-
29
- _DESCRIPTION = """\
30
- `best2009` is a Thai word-tokenization dataset from encyclopedia, novels, news and articles by
31
- [NECTEC](https://www.nectec.or.th/) (148,995/2,252 lines of train/test). It was created for
32
- [BEST 2010: Word Tokenization Competition](https://thailang.nectec.or.th/archive/indexa290.html?q=node/10).
33
- The test set answers are not provided publicly.
34
- """
35
-
36
-
37
- class Best2009Config(datasets.BuilderConfig):
38
- def __init__(self, **kwargs):
39
- """BuilderConfig
40
-
41
- Args:
42
- **kwargs: keyword arguments forwarded to super.
43
- """
44
- super(Best2009Config, self).__init__(**kwargs)
45
-
46
-
47
- class Best2009(datasets.GeneratorBasedBuilder):
48
-
49
- _DOWNLOAD_URL = "https://archive.org/download/best_dataset/data.zip"
50
- _TRAIN_FOLDER = "train"
51
- _TEST_FOLDER = "test"
52
-
53
- _USELESS_TAGS = {"<NE>": "", "</NE>": "", "<AB>": "", "</AB>": ""}
54
- # character type mapping from https://github.com/rkcosmos/deepcut/blob/master/deepcut/utils.py
55
- _CHAR_TYPES_DICT = {
56
- "กขฃคฆงจชซญฎฏฐฑฒณดตถทธนบปพฟภมยรลวศษสฬอ": "c",
57
- "ฅฉผฟฌหฮ": "n",
58
- "ะาำิีืึุู": "v", # า ะ ำ ิ ี ึ ื ั ู ุ
59
- "เแโใไ": "w",
60
- "่้๊๋": "t", # วรรณยุกต์ ่ ้ ๊ ๋
61
- "์ๆฯ.": "s", # ์ ๆ ฯ .
62
- "0123456789๑๒๓๔๕๖๗๘๙": "d",
63
- '"': "q",
64
- "‘": "q",
65
- "’": "q",
66
- "'": "q",
67
- " ": "p",
68
- "abcdefghijklmnopqrstuvwxyz": "s_e",
69
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ": "b_e",
70
- }
71
- _CHAR_TYPE_FLATTEN = {}
72
- for ks, v in _CHAR_TYPES_DICT.items():
73
- for k in ks:
74
- _CHAR_TYPE_FLATTEN[k] = v
75
- _CHAR_TYPES = ["b_e", "c", "d", "n", "o", "p", "q", "s", "s_e", "t", "v", "w"]
76
-
77
- BUILDER_CONFIGS = [
78
- Best2009Config(
79
- name="best2009",
80
- version=datasets.Version("1.0.0"),
81
- description=_DESCRIPTION,
82
- ),
83
- ]
84
-
85
- def _info(self):
86
- return datasets.DatasetInfo(
87
- description=_DESCRIPTION,
88
- features=datasets.Features(
89
- {
90
- "fname": datasets.Value("string"),
91
- "char": datasets.Sequence(datasets.Value("string")),
92
- "char_type": datasets.Sequence(datasets.features.ClassLabel(names=self._CHAR_TYPES)),
93
- "is_beginning": datasets.Sequence(datasets.features.ClassLabel(names=["neg", "pos"])),
94
- }
95
- ),
96
- supervised_keys=None,
97
- homepage="https://aiforthai.in.th/",
98
- citation=_CITATION,
99
- license=_LICENSE,
100
- )
101
-
102
- def _split_generators(self, dl_manager):
103
- arch_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
104
- data_dir = os.path.join(arch_path, "data")
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- gen_kwargs={"filepath": os.path.join(data_dir, self._TRAIN_FOLDER), "split": "train"},
109
- ),
110
- datasets.SplitGenerator(
111
- name=datasets.Split.TEST,
112
- gen_kwargs={"filepath": os.path.join(data_dir, self._TEST_FOLDER), "split": "train"},
113
- ),
114
- ]
115
-
116
- def _generate_examples(self, filepath, split):
117
- for file_idx, fname in enumerate(sorted(Path(filepath).rglob("*.txt"))):
118
- with open(fname, encoding="utf-8") as f:
119
- for line_idx, line in enumerate(f):
120
- chars = []
121
- char_types = []
122
- is_beginnings = []
123
- # replace useless tokens
124
- line = reduce(lambda a, kv: a.replace(*kv), self._USELESS_TAGS.items(), line)
125
- # tokens are pipe separated
126
- splits = line.split("|")
127
- for token in splits:
128
- for i in range(len(token)):
129
- chars.append(token[i])
130
- char_types.append(self._CHAR_TYPE_FLATTEN.get(token[i], "o"))
131
- is_beginning = 1 if i == 0 else 0
132
- is_beginnings.append(is_beginning)
133
- yield f"{file_idx}_{line_idx}", {
134
- "fname": fname.name,
135
- "char": chars,
136
- "char_type": char_types,
137
- "is_beginning": is_beginnings if split == "train" else [0 for i in range(len(chars))],
138
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
best2009/best2009-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db43200bd82d924047a4e718b05df9c23bab3df68ff935eaafaf3d6485c2b294
3
+ size 493104
best2009/best2009-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97253bfd6f1251ef17ade968d320170197c2fc70b071f57317268b20bc1682ec
3
+ size 27591681
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"best2009": {"description": "`best2009` is a Thai word-tokenization dataset from encyclopedia, novels, news and articles by\n[NECTEC](https://www.nectec.or.th/) (148,995/2,252 lines of train/test). It was created for\n[BEST 2010: Word Tokenization Competition](https://thailang.nectec.or.th/archive/indexa290.html?q=node/10).\nThe test set answers are not provided publicly.\n", "citation": "@inproceedings{kosawat2009best,\n title={BEST 2009: Thai word segmentation software contest},\n author={Kosawat, Krit and Boriboon, Monthika and Chootrakool, Patcharika and Chotimongkol, Ananlada and Klaithin, Supon and Kongyoung, Sarawoot and Kriengket, Kanyanut and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and others},\n booktitle={2009 Eighth International Symposium on Natural Language Processing},\n pages={83--88},\n year={2009},\n organization={IEEE}\n}\n@inproceedings{boriboon2009best,\n title={Best corpus development and analysis},\n author={Boriboon, Monthika and Kriengket, Kanyanut and Chootrakool, Patcharika and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and Kosawat, Krit},\n booktitle={2009 International Conference on Asian Language Processing},\n pages={322--327},\n year={2009},\n organization={IEEE}\n}\n", "homepage": "https://aiforthai.in.th/", "license": "CC-BY-NC-SA 3.0", "features": {"fname": {"dtype": "string", "id": null, "_type": "Value"}, "char": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "char_type": {"feature": {"num_classes": 12, "names": ["b_e", "c", "d", "n", "o", "p", "q", "s", "s_e", "t", "v", "w"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "is_beginning": {"feature": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "best2009", "config_name": "best2009", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 483129998, "num_examples": 148995, "dataset_name": "best2009"}, "test": {"name": "test", "num_bytes": 10498726, "num_examples": 2252, "dataset_name": "best2009"}}, "download_checksums": {"https://archive.org/download/best_dataset/data.zip": {"num_bytes": 13891260, "checksum": "009386ea03aab2abd194bcb3b86c01b81038f460296c447ce2c0e561d3eca64f"}}, "download_size": 13891260, "post_processing_size": null, "dataset_size": 493628724, "size_in_bytes": 507519984}}