taishi-i commited on
Commit
30143df
1 Parent(s): 327adb9

add nagisa_stopwords datasets

Browse files
Files changed (4) hide show
  1. README.md +36 -0
  2. nagisa_stopwords.csv +101 -0
  3. nagisa_stopwords.py +64 -0
  4. test_nagisa_stopwords.py +217 -0
README.md CHANGED
@@ -1,3 +1,39 @@
1
  ---
2
  license: mit
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
+ tags:
4
+ - stopwords
5
+ pretty_name: stopwords
6
+ size_categories:
7
+ - n<1K
8
+ language:
9
+ - ja
10
  ---
11
+
12
+ # Japanese stopwords for nagisa
13
+
14
+
15
+ This is a stopword list of frequently used words in the Japanese language, created according to the tokenization rules of the Japanese text analysis library, [nagisa](https://github.com/taishi-i/nagisa).
16
+
17
+ This list is constructed by extracting the top 100 most commonly used words from the [CC-100 dataset](https://data.statmt.org/cc-100/) and [Wikipedia](https://dumps.wikimedia.org/other/cirrussearch/).
18
+
19
+ To access this list of words, simply run the provided program code below.
20
+
21
+ Please install Huggingface datasets library.
22
+
23
+ ```bash
24
+ $ pip install datasets
25
+ ```
26
+
27
+ After installing the library, please run the following code next.
28
+
29
+ ```python
30
+ from datasets import load_dataset
31
+
32
+ dataset = load_dataset("taishi-i/nagisa_stopwords")
33
+
34
+ # the top 100 most commonly used words
35
+ words = dataset["nagisa_stopwords"]["words"]
36
+
37
+ # the part-of-speech list for the top 100 most commonly used words
38
+ postags = dataset["nagisa_stopwords"]["postags"]
39
+ ```
nagisa_stopwords.csv ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ words,postags
2
+ の,助詞
3
+ 、,補助記号
4
+ 。,補助記号
5
+ て,助詞
6
+ に,助詞
7
+ を,助詞
8
+ が,助詞
9
+ は,助詞
10
+ と,助詞
11
+ た,助動詞
12
+ し,動詞
13
+ で,助詞
14
+ も,助詞
15
+ ます,助動詞
16
+  ,空白
17
+ で,助動詞
18
+ です,助動詞
19
+ 1,名詞
20
+ こと,名詞
21
+ な,助動詞
22
+ 0,名詞
23
+ に,助動詞
24
+ いる,動詞
25
+ する,動詞
26
+ い,動詞
27
+ か,助詞
28
+ 2,名詞
29
+ 「,補助記号
30
+ 」,補助記号
31
+ から,助詞
32
+ お,接頭辞
33
+ れ,助動詞
34
+ まし,助動詞
35
+ ・,補助記号
36
+ (,補助記号
37
+ ),補助記号
38
+ さ,動詞
39
+ ない,助動詞
40
+ ある,動詞
41
+ いう,動詞
42
+ よう,形状詞
43
+ 年,名詞
44
+ だ,助動詞
45
+ あり,動詞
46
+ や,助詞
47
+ 3,名詞
48
+ その,連体詞
49
+ なっ,動詞
50
+ !,補助記号
51
+ ませ,助動詞
52
+ ん,助動詞
53
+ 5,名詞
54
+ この,連体詞
55
+ ない,形容詞
56
+ ご,接頭辞
57
+ 9,名詞
58
+ 月,名詞
59
+ 的,接尾辞
60
+ 4,名詞
61
+ ば,助詞
62
+ など,助詞
63
+ 人,名詞
64
+ もの,名詞
65
+ 8,名詞
66
+ でき,動詞
67
+ 方,名詞
68
+ なり,動詞
69
+ ため,名詞
70
+ 6,名詞
71
+ なる,動詞
72
+ まで,助詞
73
+ 7,名詞
74
+ ?,補助記号
75
+ ね,助詞
76
+ 者,接尾辞
77
+ それ,代名詞
78
+ さん,接尾辞
79
+ 場合,名詞
80
+ たい,助動詞
81
+ 私,代名詞
82
+ れる,助動詞
83
+ 日,名詞
84
+ ん,助詞
85
+ 思い,動詞
86
+ これ,代名詞
87
+ 自分,名詞
88
+ ください,動詞
89
+ 時,名詞
90
+ できる,動詞
91
+ 日,接尾辞
92
+ だけ,助詞
93
+ ...,補助記号
94
+ でしょう,助動詞
95
+ 日本,名詞
96
+ -,補助記号
97
+ き,動詞
98
+ おり,動詞
99
+ 一,名詞
100
+ たら,助動詞
101
+ られ,助動詞
nagisa_stopwords.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+
3
+ import datasets
4
+
5
+ _DESCRIPTION = """\
6
+ Japanese stopwords for nagisa.
7
+ """
8
+
9
+ _HOMEPAGE = "https://github.com/taishi-i/nagisa"
10
+ _CITATION = ""
11
+ _LICENSE = "MIT"
12
+
13
+
14
+ class NagisaStopwordsDataset(datasets.GeneratorBasedBuilder):
15
+ """Japanese stopwords for nagisa."""
16
+
17
+ VERSION = datasets.Version("0.0.1")
18
+ BUILDER_CONFIGS = [
19
+ datasets.BuilderConfig(
20
+ name="nagisa_stopwords",
21
+ version=VERSION,
22
+ description=_DESCRIPTION,
23
+ ),
24
+ ]
25
+
26
+ def _info(self):
27
+ features = datasets.Features(
28
+ {
29
+ "words": datasets.Value("string"),
30
+ "postags": datasets.Value("string"),
31
+ }
32
+ )
33
+
34
+ return datasets.DatasetInfo(
35
+ description=_DESCRIPTION,
36
+ features=features,
37
+ homepage=_HOMEPAGE,
38
+ license=_LICENSE,
39
+ citation=_CITATION,
40
+ )
41
+
42
+ def _split_generators(self, dl_manager):
43
+ data_url = "nagisa_stopwords.csv"
44
+ return [
45
+ datasets.SplitGenerator(
46
+ name="nagisa_stopwords",
47
+ gen_kwargs={
48
+ "filepath": dl_manager.download_and_extract(data_url),
49
+ "split": "words",
50
+ },
51
+ )
52
+ ]
53
+
54
+ def _generate_examples(self, filepath, split):
55
+ """Generates examples."""
56
+ with open(filepath, "r", encoding="utf-8") as f:
57
+ reader = csv.reader(f)
58
+ next(reader) # Skip the header row
59
+ for id_, row in enumerate(reader):
60
+ words, postags = row
61
+ yield id_, {
62
+ "words": words,
63
+ "postags": postags,
64
+ }
test_nagisa_stopwords.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+
4
+ def test_load_dataset():
5
+ dataset = load_dataset("taishi-i/nagisa_stopwords")
6
+ nagisa_stopwords = dataset["nagisa_stopwords"]
7
+
8
+ assert "words" in nagisa_stopwords.features
9
+ assert "postags" in nagisa_stopwords.features
10
+
11
+ expected_words = [
12
+ "の",
13
+ "、",
14
+ "。",
15
+ "て",
16
+ "に",
17
+ "を",
18
+ "が",
19
+ "は",
20
+ "と",
21
+ "た",
22
+ "し",
23
+ "で",
24
+ "も",
25
+ "ます",
26
+ "\u3000",
27
+ "で",
28
+ "です",
29
+ "1",
30
+ "こと",
31
+ "な",
32
+ "0",
33
+ "に",
34
+ "いる",
35
+ "する",
36
+ "い",
37
+ "か",
38
+ "2",
39
+ "「",
40
+ "」",
41
+ "から",
42
+ "お",
43
+ "れ",
44
+ "まし",
45
+ "・",
46
+ "(",
47
+ ")",
48
+ "さ",
49
+ "ない",
50
+ "ある",
51
+ "いう",
52
+ "よう",
53
+ "年",
54
+ "だ",
55
+ "あり",
56
+ "や",
57
+ "3",
58
+ "その",
59
+ "なっ",
60
+ "!",
61
+ "ませ",
62
+ "ん",
63
+ "5",
64
+ "この",
65
+ "ない",
66
+ "ご",
67
+ "9",
68
+ "月",
69
+ "的",
70
+ "4",
71
+ "ば",
72
+ "など",
73
+ "人",
74
+ "もの",
75
+ "8",
76
+ "でき",
77
+ "方",
78
+ "なり",
79
+ "ため",
80
+ "6",
81
+ "なる",
82
+ "まで",
83
+ "7",
84
+ "?",
85
+ "ね",
86
+ "者",
87
+ "それ",
88
+ "さん",
89
+ "場合",
90
+ "たい",
91
+ "私",
92
+ "れる",
93
+ "日",
94
+ "ん",
95
+ "思い",
96
+ "これ",
97
+ "自分",
98
+ "ください",
99
+ "時",
100
+ "できる",
101
+ "日",
102
+ "だけ",
103
+ "...",
104
+ "でしょう",
105
+ "日本",
106
+ "-",
107
+ "き",
108
+ "おり",
109
+ "一",
110
+ "たら",
111
+ "られ",
112
+ ]
113
+ assert expected_words == nagisa_stopwords["words"]
114
+
115
+ expected_postags = [
116
+ "助詞",
117
+ "補助記号",
118
+ "補助記号",
119
+ "助詞",
120
+ "助詞",
121
+ "助詞",
122
+ "助詞",
123
+ "助詞",
124
+ "助詞",
125
+ "助動詞",
126
+ "動詞",
127
+ "助詞",
128
+ "助詞",
129
+ "助動詞",
130
+ "空白",
131
+ "助動詞",
132
+ "助動詞",
133
+ "名詞",
134
+ "名詞",
135
+ "助動詞",
136
+ "名詞",
137
+ "助動詞",
138
+ "動詞",
139
+ "動詞",
140
+ "動詞",
141
+ "助詞",
142
+ "名詞",
143
+ "補助記号",
144
+ "補助記号",
145
+ "助詞",
146
+ "接頭辞",
147
+ "助動詞",
148
+ "助動詞",
149
+ "補助記号",
150
+ "補助記号",
151
+ "補助記号",
152
+ "動詞",
153
+ "助動詞",
154
+ "動詞",
155
+ "動詞",
156
+ "形状詞",
157
+ "名詞",
158
+ "助動詞",
159
+ "動詞",
160
+ "助詞",
161
+ "名詞",
162
+ "連体詞",
163
+ "動詞",
164
+ "補助記号",
165
+ "助動詞",
166
+ "助動詞",
167
+ "名詞",
168
+ "連体詞",
169
+ "形容詞",
170
+ "接頭辞",
171
+ "名詞",
172
+ "名詞",
173
+ "接尾辞",
174
+ "名詞",
175
+ "助詞",
176
+ "助詞",
177
+ "名詞",
178
+ "名詞",
179
+ "名詞",
180
+ "動詞",
181
+ "名詞",
182
+ "動詞",
183
+ "名詞",
184
+ "名詞",
185
+ "動詞",
186
+ "助詞",
187
+ "名詞",
188
+ "補助記号",
189
+ "助詞",
190
+ "接尾辞",
191
+ "代名詞",
192
+ "接尾辞",
193
+ "名詞",
194
+ "助動詞",
195
+ "代名詞",
196
+ "助動詞",
197
+ "名詞",
198
+ "助詞",
199
+ "動詞",
200
+ "代名詞",
201
+ "名詞",
202
+ "動詞",
203
+ "名詞",
204
+ "動詞",
205
+ "接尾辞",
206
+ "助詞",
207
+ "補助記号",
208
+ "助動詞",
209
+ "名詞",
210
+ "補助記号",
211
+ "動詞",
212
+ "動詞",
213
+ "名詞",
214
+ "助動詞",
215
+ "助動詞",
216
+ ]
217
+ assert expected_postags == nagisa_stopwords["postags"]