system HF staff commited on
Commit
625705b
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - th
8
+ licenses:
9
+ - cc0-1-0
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - n<1K
14
+ source_datasets:
15
+ - extended|wisesight_sentiment
16
+ task_categories:
17
+ - structure-prediction
18
+ task_ids:
19
+ - structure-prediction-other-word-tokenization
20
+ ---
21
+
22
+ # Dataset Card for `wisesight1000`
23
+
24
+ ## Table of Contents
25
+ - [Dataset Description](#dataset-description)
26
+ - [Dataset Summary](#dataset-summary)
27
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
28
+ - [Languages](#languages)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Instances](#data-instances)
31
+ - [Data Fields](#data-fields)
32
+ - [Data Splits](#data-splits)
33
+ - [Dataset Creation](#dataset-creation)
34
+ - [Curation Rationale](#curation-rationale)
35
+ - [Source Data](#source-data)
36
+ - [Annotations](#annotations)
37
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
39
+ - [Social Impact of Dataset](#social-impact-of-dataset)
40
+ - [Discussion of Biases](#discussion-of-biases)
41
+ - [Other Known Limitations](#other-known-limitations)
42
+ - [Additional Information](#additional-information)
43
+ - [Dataset Curators](#dataset-curators)
44
+ - [Licensing Information](#licensing-information)
45
+ - [Citation Information](#citation-information)
46
+
47
+ ## Dataset Description
48
+
49
+ - **Homepage:** https://github.com/PyThaiNLP/wisesight-sentiment
50
+ - **Repository:** https://github.com/PyThaiNLP/wisesight-sentiment/blob/master/word-tokenization/
51
+ - **Paper:**
52
+ - **Leaderboard:**
53
+ - **Point of Contact:** https://github.com/PyThaiNLP/
54
+
55
+ ### Dataset Summary
56
+
57
+ `wisesight1000` contains Thai social media texts randomly drawn from the full `wisesight-sentiment`, tokenized by human annotators.
58
+ Out of the labels `neg` (negative), `neu` (neutral), `pos` (positive), `q` (question), 250 samples each. Some texts are removed because they look like spam. Because these samples are representative of real world content, we believe having these annotaed samples will allow the community to robustly evaluate tokenization algorithms.
59
+
60
+ ### Supported Tasks and Leaderboards
61
+
62
+ word tokenization
63
+
64
+ ### Languages
65
+
66
+ Thai
67
+
68
+ ## Dataset Structure
69
+
70
+ ### Data Instances
71
+
72
+ ```
73
+ {'char': ['E', 'u', 'c', 'e', 'r', 'i', 'n', ' ', 'p', 'r', 'o', ' ', 'a', 'c', 'n', 'e', ' ', 'ค', '่', 'ะ', ' ', 'ใ', 'ช', '้', 'แ', 'ล', '้', 'ว', 'ส', 'ิ', 'ว', 'ข', 'ึ', '้', 'น', 'เ', 'พ', 'ิ', '่', 'ม', 'ท', 'ุ', 'ก', 'ว', 'ั', 'น', ' ', 'ม', 'า', 'ด', 'ู', 'ก', 'ั', 'น', 'น', 'ะ', 'ค', 'ะ', ' ', 'ว', '่', 'า', 'จ', 'ั', 'ด', 'ก', 'า', 'ร', 'ป', 'ั', 'ญ', 'ห', 'า', 'ส', 'ิ', 'ว', 'ใ', 'น', '7', 'ว', 'ั', 'น', 'ไ', 'ด', '้', 'ร', 'ึ', 'ม', 'ั', '่', 'ย', 'ย', 'ย', 'ย', 'ย', 'ย', 'ย', 'ย', ' ', 'ล', '่', 'า', 'ส', 'ุ', 'ด', 'ไ', 'ป', 'ล', '้', 'า', 'ง', 'ห', 'น', '้', '…', '\n'], 'char_type': [0, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 8, 8, 8, 5, 1, 9, 10, 5, 11, 1, 9, 11, 1, 9, 1, 1, 10, 1, 1, 10, 9, 1, 11, 1, 10, 9, 1, 1, 10, 1, 1, 4, 1, 5, 1, 10, 1, 10, 1, 4, 1, 1, 10, 1, 10, 5, 1, 9, 10, 1, 4, 1, 1, 10, 1, 1, 4, 1, 3, 10, 1, 10, 1, 11, 1, 2, 1, 4, 1, 11, 1, 9, 1, 10, 1, 4, 9, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 9, 10, 1, 10, 1, 11, 1, 1, 9, 10, 1, 3, 1, 9, 4, 4], 'is_beginning': [1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0]}
74
+ {'char': ['แ', 'พ', 'ง', 'เ', 'ว', '่', 'อ', 'ร', '์', ' ', 'เ', 'บ', 'ี', 'ย', 'ร', '์', 'ช', '้', 'า', 'ง', 'ต', '้', 'น', 'ท', 'ุ', 'น', 'ข', 'ว', 'ด', 'ล', 'ะ', 'ไ', 'ม', '่', 'ถ', 'ึ', 'ง', ' ', '5', '0', ' ', 'ข', 'า', 'ย', ' ', '1', '2', '0', ' ', '😰', '😰', '😰', '์', '\n'], 'char_type': [11, 1, 1, 11, 1, 9, 1, 1, 7, 5, 11, 1, 10, 1, 1, 7, 1, 9, 10, 1, 1, 9, 1, 1, 10, 1, 1, 1, 1, 1, 10, 11, 1, 9, 1, 10, 1, 5, 2, 2, 5, 1, 10, 1, 5, 2, 2, 2, 5, 4, 4, 4, 7, 4], 'is_beginning': [1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0]}
75
+ ```
76
+
77
+ ### Data Fields
78
+
79
+ - `char`: characters
80
+ - `char_type`: character types as adopted from []() by [deepcut](https://github.com/rkcosmos/deepcut)
81
+ - `is_beginning`: 1 if beginning of word else 0
82
+
83
+ ### Data Splits
84
+
85
+ No explicit split is given.
86
+
87
+ ## Dataset Creation
88
+
89
+ ### Curation Rationale
90
+
91
+ The dataset was created from `wisesight-sentiment` to be a word tokenization benchmark that is closer to texts in the wild, since other Thai word tokenization datasets such as [BEST](https://aiforthai.in.th/corpus.php) are mostly texts from news articles, which do not have some real-world features like misspellings.
92
+
93
+ ### Source Data
94
+
95
+ #### Initial Data Collection and Normalization
96
+
97
+ The data are sampled from `wisesight-sentiment` which has the following data collection and normalization:
98
+ - Style: Informal and conversational. With some news headlines and advertisement.
99
+ - Time period: Around 2016 to early 2019. With small amount from other period.
100
+ - Domains: Mixed. Majority are consumer products and services (restaurants, cosmetics, drinks, car, hotels), with some current affairs.
101
+ - Privacy:
102
+ - Only messages that made available to the public on the internet (websites, blogs, social network sites).
103
+ - For Facebook, this means the public comments (everyone can see) that made on a public page.
104
+ - Private/protected messages and messages in groups, chat, and inbox are not included.
105
+ - Usernames and non-public figure names are removed
106
+ - Phone numbers are masked (e.g. 088-888-8888, 09-9999-9999, 0-2222-2222)
107
+ - If you see any personal data still remain in the set, please tell us - so we can remove them.
108
+ - Alternations and modifications:
109
+ - Keep in mind that this corpus does not statistically represent anything in the language register.
110
+ - Large amount of messages are not in their original form. Personal data are removed or masked.
111
+ - Duplicated, leading, and trailing whitespaces are removed. Other punctuations, symbols, and emojis are kept intact.
112
+ - (Mis)spellings are kept intact.
113
+ - Messages longer than 2,000 characters are removed.
114
+ - Long non-Thai messages are removed. Duplicated message (exact match) are removed.
115
+
116
+ #### Who are the source language producers?
117
+
118
+ Social media users in Thailand
119
+
120
+ ### Annotations
121
+
122
+ #### Annotation process
123
+ [More Information Needed]
124
+
125
+ #### Who are the annotators?
126
+
127
+ The annotation was done by several people, including Nitchakarn Chantarapratin, [Pattarawat Chormai](https://github.com/heytitle), [Ponrawee Prasertsom](https://github.com/ponrawee), [Jitkapat Sawatphol](https://github.com/jitkapat), [Nozomi Yamada](https://github.com/nozomiyamada), and [Attapol Rutherford](https://attapol.github.io/).
128
+
129
+ ### Personal and Sensitive Information
130
+
131
+ - The authors tried to exclude any known personally identifiable information from this data set.
132
+ - Usernames and non-public figure names are removed
133
+ - Phone numbers are masked (e.g. 088-888-8888, 09-9999-9999, 0-2222-2222)
134
+ - If you see any personal data still remain in the set, please tell us - so we can remove them.
135
+
136
+ ## Considerations for Using the Data
137
+
138
+ ### Social Impact of Dataset
139
+
140
+ - word tokenization dataset from texts in the wild
141
+
142
+ ### Discussion of Biases
143
+
144
+ - no guideline is given by the authors on word tokenization
145
+
146
+ ### Other Known Limitations
147
+
148
+ [More Information Needed]
149
+
150
+ ## Additional Information
151
+
152
+ ### Dataset Curators
153
+
154
+ Thanks [PyThaiNLP](https://github.com/PyThaiNLP/pythainlp) community, [Kitsuchart Pasupa](http://www.it.kmitl.ac.th/~kitsuchart/) (Faculty of Information Technology, King Mongkut's Institute of Technology Ladkrabang), and [Ekapol Chuangsuwanich](https://www.cp.eng.chula.ac.th/en/about/faculty/ekapolc/) (Faculty of Engineering, Chulalongkorn University) for advice. The original Kaggle competition, using the first version of this corpus, can be found at https://www.kaggle.com/c/wisesight-sentiment/
155
+
156
+ ### Licensing Information
157
+
158
+ CC0
159
+
160
+ ### Citation Information
161
+
162
+ Dataset:
163
+ ```
164
+ @software{bact_2019_3457447,
165
+ author = {Suriyawongkul, Arthit and
166
+ Chuangsuwanich, Ekapol and
167
+ Chormai, Pattarawat and
168
+ Polpanumas, Charin},
169
+ title = {PyThaiNLP/wisesight-sentiment: First release},
170
+ month = sep,
171
+ year = 2019,
172
+ publisher = {Zenodo},
173
+ version = {v1.0},
174
+ doi = {10.5281/zenodo.3457447},
175
+ url = {https://doi.org/10.5281/zenodo.3457447}
176
+ }
177
+ ```
178
+
179
+ Character type features:
180
+ ```
181
+ @inproceedings{haruechaiyasak2009tlex,
182
+ title={TLex: Thai lexeme analyser based on the conditional random fields},
183
+ author={Haruechaiyasak, Choochart and Kongyoung, Sarawoot},
184
+ booktitle={Proceedings of 8th International Symposium on Natural Language Processing},
185
+ year={2009}
186
+ }
187
+ ```
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"wisesight1000": {"description": "\n", "citation": "@software{bact_2019_3457447,\n author = {Suriyawongkul, Arthit and\n Chuangsuwanich, Ekapol and\n Chormai, Pattarawat and\n Polpanumas, Charin},\n title = {PyThaiNLP/wisesight-sentiment: First release},\n month = sep,\n year = 2019,\n publisher = {Zenodo},\n version = {v1.0},\n doi = {10.5281/zenodo.3457447},\n url = {https://doi.org/10.5281/zenodo.3457447}\n}\n", "homepage": "https://github.com/PyThaiNLP/wisesight-sentiment", "license": "CC-0 3.0", "features": {"char": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "char_type": {"feature": {"num_classes": 12, "names": ["b_e", "c", "d", "n", "o", "p", "q", "s", "s_e", "t", "v", "w"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "is_beginning": {"feature": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "wisesight1000", "config_name": "wisesight1000", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1735438, "num_examples": 993, "dataset_name": "wisesight1000"}}, "download_checksums": {"https://raw.githubusercontent.com/PyThaiNLP/wisesight-sentiment/master/word-tokenization/wisesight-1000-samples-tokenised.label": {"num_bytes": 222691, "checksum": "0d3f8f9958bbee6f9fcc9637fc41ca509ff0d42f1f204e7baaf633c1d2390f90"}}, "download_size": 222691, "post_processing_size": null, "dataset_size": 1735438, "size_in_bytes": 1958129}}
dummy/wisesight1000/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ebc8e57afca74290adc1f826fbc1ea0d0fabd3191e9edd264a6e9a2ac98511
3
+ size 1814
wisesight1000.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import, division, print_function
2
+
3
+ import datasets
4
+
5
+
6
+ _CITATION = """\
7
+ @software{bact_2019_3457447,
8
+ author = {Suriyawongkul, Arthit and
9
+ Chuangsuwanich, Ekapol and
10
+ Chormai, Pattarawat and
11
+ Polpanumas, Charin},
12
+ title = {PyThaiNLP/wisesight-sentiment: First release},
13
+ month = sep,
14
+ year = 2019,
15
+ publisher = {Zenodo},
16
+ version = {v1.0},
17
+ doi = {10.5281/zenodo.3457447},
18
+ url = {https://doi.org/10.5281/zenodo.3457447}
19
+ }
20
+ """
21
+
22
+ _LICENSE = "CC0"
23
+
24
+ _DESCRIPTION = """\
25
+ `wisesight1000` contains Thai social media texts randomly drawn from the full `wisesight-sentiment`, tokenized by human annotators.
26
+ Out of the labels `neg` (negative), `neu` (neutral), `pos` (positive), `q` (question), 250 samples each. Some texts are removed because
27
+ they look like spam.Because these samples are representative of real world content, we believe having these annotaed samples will allow
28
+ the community to robustly evaluate tokenization algorithms.
29
+ """
30
+
31
+
32
+ class Wisesight1000Config(datasets.BuilderConfig):
33
+ def __init__(self, **kwargs):
34
+ """BuilderConfig
35
+
36
+ Args:
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super(Wisesight1000Config, self).__init__(**kwargs)
40
+
41
+
42
+ class Wisesight1000(datasets.GeneratorBasedBuilder):
43
+
44
+ _DOWNLOAD_URL = "https://raw.githubusercontent.com/PyThaiNLP/wisesight-sentiment/master/word-tokenization/wisesight-1000-samples-tokenised.label"
45
+ # character type mapping from https://github.com/rkcosmos/deepcut/blob/master/deepcut/utils.py
46
+ _CHAR_TYPES_DICT = {
47
+ "กขฃคฆงจชซญฎฏฐฑฒณดตถทธนบปพฟภมยรลวศษสฬอ": "c",
48
+ "ฅฉผฟฌหฮ": "n",
49
+ "ะาำิีืึุู": "v", # า ะ ำ ิ ี ึ ื ั ู ุ
50
+ "เแโใไ": "w",
51
+ "่้๊๋": "t", # วรรณยุกต์ ่ ้ ๊ ๋
52
+ "์ๆฯ.": "s", # ์ ๆ ฯ .
53
+ "0123456789๑๒๓๔๕๖๗๘๙": "d",
54
+ '"': "q",
55
+ "‘": "q",
56
+ "’": "q",
57
+ "'": "q",
58
+ " ": "p",
59
+ "abcdefghijklmnopqrstuvwxyz": "s_e",
60
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ": "b_e",
61
+ }
62
+ _CHAR_TYPE_FLATTEN = {}
63
+ for ks, v in _CHAR_TYPES_DICT.items():
64
+ for k in ks:
65
+ _CHAR_TYPE_FLATTEN[k] = v
66
+ _CHAR_TYPES = ["b_e", "c", "d", "n", "o", "p", "q", "s", "s_e", "t", "v", "w"]
67
+
68
+ BUILDER_CONFIGS = [
69
+ Wisesight1000Config(
70
+ name="wisesight1000",
71
+ version=datasets.Version("1.0.0"),
72
+ description="993 word-annotated social media messages sampled from `wisesight-sentiment`",
73
+ ),
74
+ ]
75
+
76
+ def _info(self):
77
+ return datasets.DatasetInfo(
78
+ description=_DESCRIPTION,
79
+ features=datasets.Features(
80
+ {
81
+ "char": datasets.Sequence(datasets.Value("string")),
82
+ "char_type": datasets.Sequence(datasets.features.ClassLabel(names=self._CHAR_TYPES)),
83
+ "is_beginning": datasets.Sequence(datasets.features.ClassLabel(names=["neg", "pos"])),
84
+ }
85
+ ),
86
+ supervised_keys=None,
87
+ homepage="https://github.com/PyThaiNLP/wisesight-sentiment",
88
+ citation=_CITATION,
89
+ license=_LICENSE,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ data_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={"filepath": data_path},
98
+ ),
99
+ ]
100
+
101
+ def _generate_examples(self, filepath):
102
+ with open(filepath, encoding="utf-8") as f:
103
+ for _id, line in enumerate(f):
104
+ chars = []
105
+ char_types = []
106
+ is_beginnings = []
107
+ # tokens are pipe separated
108
+ splits = line.split("|")
109
+ for token in splits:
110
+ for i in range(len(token)):
111
+ chars.append(token[i])
112
+ char_types.append(self._CHAR_TYPE_FLATTEN.get(token[i], "o"))
113
+ is_beginning = 1 if i == 0 else 0
114
+ is_beginnings.append(is_beginning)
115
+ yield _id, {
116
+ "char": chars,
117
+ "char_type": char_types,
118
+ "is_beginning": is_beginnings,
119
+ }