acul3 commited on
Commit
721240e
1 Parent(s): 820f9a8

init files

Browse files
.gitattributes CHANGED
@@ -1,51 +1 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.zst filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
KoPI-NLLB.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Cleaned Indonesian split of the KoPI corpus."""
16
+ import json
17
+ import glob
18
+ import gzip
19
+ import textwrap
20
+ import datasets
21
+ import zstandard as zstd
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+ _CITATION = """
25
+ """
26
+ _DESCRIPTION = """\
27
+ """
28
+ _HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI"
29
+ _LICENSE = "CC0"
30
+ _BASE_URL = {
31
+ "train":"https://huggingface.co/datasets/munggok/KoPI/resolve/main/raw/kopi-{index:012d}.json.zst",
32
+ "val":"https://huggingface.co/datasets/munggok/KoPI/resolve/main/raw/kopi-val-{index:012d}.json.zst"
33
+
34
+ }
35
+ _CONFIGS = {
36
+ "tiny": {"train": 10, "validation": 1},
37
+ "small": {"train": 30, "validation": 2},
38
+ "medium": {"train": 55, "validation": 2},
39
+ "large": {"train": 75, "validation": 3},
40
+ "full": {"train": 107, "validation": 4}
41
+ }
42
+ class KoPIConfig(datasets.BuilderConfig):
43
+ """BuilderConfig for the Clean KoPI corpus."""
44
+ def __init__(self, **kwargs):
45
+ """BuilderConfig for Clean KoPI corpus.
46
+ Args:
47
+ **kwargs: keyword arguments forwarded to super.
48
+ """
49
+ super().__init__(**kwargs)
50
+ class KoPI(datasets.GeneratorBasedBuilder):
51
+ """KoPI corpus."""
52
+ BUILDER_CONFIGS = [
53
+ KoPIConfig(
54
+ name="tiny",
55
+ version=datasets.Version("1.0.0"),
56
+ description=textwrap.dedent(
57
+ f"""\
58
+ Tiny version only using 10 shard
59
+ """
60
+ )
61
+ ),
62
+ KoPIConfig(
63
+ name="small",
64
+ version=datasets.Version("1.0.0"),
65
+ description=textwrap.dedent(
66
+ f"""\
67
+ small version only using 30 shard
68
+ """
69
+ )
70
+ ),
71
+ KoPIConfig(
72
+ name="medium",
73
+ version=datasets.Version("1.0.0"),
74
+ description=textwrap.dedent(
75
+ f"""\
76
+ medion version only using 50 shard
77
+ """
78
+ )
79
+ ),
80
+ KoPIConfig(
81
+ name="large",
82
+ version=datasets.Version("1.0.0"),
83
+ description=textwrap.dedent(
84
+ f"""\
85
+ large version only using 75 shard
86
+ """
87
+ )
88
+ ),
89
+ KoPIConfig(
90
+ name="full",
91
+ version=datasets.Version("1.0.0"),
92
+ description=textwrap.dedent(
93
+ f"""\
94
+ The full cleaned version of KoPI corpus.
95
+ Estimated size of compressed files: 53GB
96
+ """
97
+ )
98
+ )
99
+ ]
100
+ def _info(self):
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=datasets.Features(
104
+ {
105
+ "text": datasets.Value("string"),
106
+ "url": datasets.Value("string"),
107
+ "timestamp": datasets.Value("string"),
108
+ "meta": datasets.Value("string"),
109
+ }
110
+ ),
111
+ supervised_keys=None,
112
+ homepage=_HOMEPAGE,
113
+ license=_LICENSE,
114
+ citation=_CITATION,
115
+ )
116
+ def _split_generators(self, dl_manager):
117
+ train = [_BASE_URL["train"].format(index=k + 1) for k in range(107)][0:_CONFIGS[self.config.name]['train']]
118
+ validation = [_BASE_URL["val"].format(index=k + 108) for k in range(4)][0:_CONFIGS[self.config.name]['validation']]
119
+ train_downloaded_files = dl_manager.download(train)
120
+ validation_downloaded_files = dl_manager.download(validation)
121
+ return [
122
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
125
+ ),
126
+ ]
127
+ def _generate_examples(self, filepaths):
128
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
129
+ id_ = 0
130
+ for filepath in filepaths:
131
+ logger.info(f"Generating examples from {filepath}")
132
+ with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
133
+ for line in f:
134
+ if line:
135
+ example = json.loads(line)
136
+ if example.get('meta') is not None:
137
+ yield id_, {'text':example['text'],'url':example['url'],'timestamp':example['timestamp'],'meta': example['meta']}
138
+ id_ += 1
139
+ else:
140
+ yield id_, {'text':example['text'],'url':example['url'],'timestamp':example['timestamp'],'meta': "None"}
141
+ id_ += 1
README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: odc-by
3
- ---
 
 
 
 
raw/ace_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edea44616f463702f54386477b5db50dd55423aec55f2ab5dc13e9b97c6249db
3
+ size 359632553
raw/ban_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fbb054260531dffc2dba653a5a55eee1c16d6feafcff5f520d45f42253daaaf
3
+ size 140058002
raw/bjn_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3bbb5ae0ea7c3cfde4cae18f5fca9db57d87a8e77e857a435c9178069a7f5db
3
+ size 261410154
raw/ind_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b59e4a0921911961f0328a067a321dd1112cc5e7bbd5d881d5d6e5e236acd87
3
+ size 2143142647
raw/jav_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da1040a0363eef04ae3a7db175eacb14c5d82fae387e1f8575bc51069289c623
3
+ size 1915139483
raw/min_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c22afce39f640c16f11c968bec000f59b2874fe3fa96bc2284a090d721d04225
3
+ size 169301609
raw/sun_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db355f1e5712a1c9ac8ff515d30b1e1bca7ae5bc133c35035c049fe6d1f08859
3
+ size 1510544817