acul3 commited on
Commit
7404253
1 Parent(s): 721240e

add dedup file

Browse files
KoPI-NLLB.py CHANGED
@@ -16,6 +16,7 @@
16
  import json
17
  import glob
18
  import gzip
 
19
  import textwrap
20
  import datasets
21
  import zstandard as zstd
@@ -25,21 +26,24 @@ _CITATION = """
25
  """
26
  _DESCRIPTION = """\
27
  """
28
- _HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI"
29
- _LICENSE = "CC0"
30
- _BASE_URL = {
31
- "train":"https://huggingface.co/datasets/munggok/KoPI/resolve/main/raw/kopi-{index:012d}.json.zst",
32
- "val":"https://huggingface.co/datasets/munggok/KoPI/resolve/main/raw/kopi-val-{index:012d}.json.zst"
 
 
 
 
 
33
 
34
- }
35
- _CONFIGS = {
36
- "tiny": {"train": 10, "validation": 1},
37
- "small": {"train": 30, "validation": 2},
38
- "medium": {"train": 55, "validation": 2},
39
- "large": {"train": 75, "validation": 3},
40
- "full": {"train": 107, "validation": 4}
41
- }
42
- class KoPIConfig(datasets.BuilderConfig):
43
  """BuilderConfig for the Clean KoPI corpus."""
44
  def __init__(self, **kwargs):
45
  """BuilderConfig for Clean KoPI corpus.
@@ -47,56 +51,10 @@ class KoPIConfig(datasets.BuilderConfig):
47
  **kwargs: keyword arguments forwarded to super.
48
  """
49
  super().__init__(**kwargs)
50
- class KoPI(datasets.GeneratorBasedBuilder):
51
  """KoPI corpus."""
52
- BUILDER_CONFIGS = [
53
- KoPIConfig(
54
- name="tiny",
55
- version=datasets.Version("1.0.0"),
56
- description=textwrap.dedent(
57
- f"""\
58
- Tiny version only using 10 shard
59
- """
60
- )
61
- ),
62
- KoPIConfig(
63
- name="small",
64
- version=datasets.Version("1.0.0"),
65
- description=textwrap.dedent(
66
- f"""\
67
- small version only using 30 shard
68
- """
69
- )
70
- ),
71
- KoPIConfig(
72
- name="medium",
73
- version=datasets.Version("1.0.0"),
74
- description=textwrap.dedent(
75
- f"""\
76
- medion version only using 50 shard
77
- """
78
- )
79
- ),
80
- KoPIConfig(
81
- name="large",
82
- version=datasets.Version("1.0.0"),
83
- description=textwrap.dedent(
84
- f"""\
85
- large version only using 75 shard
86
- """
87
- )
88
- ),
89
- KoPIConfig(
90
- name="full",
91
- version=datasets.Version("1.0.0"),
92
- description=textwrap.dedent(
93
- f"""\
94
- The full cleaned version of KoPI corpus.
95
- Estimated size of compressed files: 53GB
96
- """
97
- )
98
- )
99
- ]
100
  def _info(self):
101
  return datasets.DatasetInfo(
102
  description=_DESCRIPTION,
@@ -104,8 +62,8 @@ class KoPI(datasets.GeneratorBasedBuilder):
104
  {
105
  "text": datasets.Value("string"),
106
  "url": datasets.Value("string"),
107
- "timestamp": datasets.Value("string"),
108
- "meta": datasets.Value("string"),
109
  }
110
  ),
111
  supervised_keys=None,
@@ -114,15 +72,14 @@ class KoPI(datasets.GeneratorBasedBuilder):
114
  citation=_CITATION,
115
  )
116
  def _split_generators(self, dl_manager):
117
- train = [_BASE_URL["train"].format(index=k + 1) for k in range(107)][0:_CONFIGS[self.config.name]['train']]
118
- validation = [_BASE_URL["val"].format(index=k + 108) for k in range(4)][0:_CONFIGS[self.config.name]['validation']]
 
 
 
119
  train_downloaded_files = dl_manager.download(train)
120
- validation_downloaded_files = dl_manager.download(validation)
121
  return [
122
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
123
- datasets.SplitGenerator(
124
- name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
125
- ),
126
  ]
127
  def _generate_examples(self, filepaths):
128
  """This function returns the examples in the raw (text) form by iterating on all the files."""
 
16
  import json
17
  import glob
18
  import gzip
19
+ from posixpath import split
20
  import textwrap
21
  import datasets
22
  import zstandard as zstd
 
26
  """
27
  _DESCRIPTION = """\
28
  """
29
+ _TYPE = ['raw','dedup','neardup']
30
+ _CONF_LANG = ['ace_Latn','ban_Latn','bjn_Latn','ind_Latn','jav_Latn','min_Latn','sun_Latn']
31
+ _CONFIGS = []
32
+ for j in _CONF_LANG:
33
+ for m in _TYPE:
34
+ _CONFIGS.append(j+'-'+m)
35
+ _ALL_CONFIG = ["all-raw", "all-dedup", "all-neardup"] + _CONFIGS
36
+ _HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI-NLLB"
37
+ _LICENSE = "ODC_C"
38
+ _BASE_URL = "https://huggingface.co/datasets/munggok/KoPI-NLLB/resolve/main/{tipe}/{lang}.json.zst",
39
 
40
+ def kopi_nllb_constructor(nam):
41
+ return KoPINLLBConfig(
42
+ name=nam,
43
+ version=datasets.Version("1.0.0"),
44
+ )
45
+
46
+ class KoPINLLBConfig(datasets.BuilderConfig):
 
 
47
  """BuilderConfig for the Clean KoPI corpus."""
48
  def __init__(self, **kwargs):
49
  """BuilderConfig for Clean KoPI corpus.
 
51
  **kwargs: keyword arguments forwarded to super.
52
  """
53
  super().__init__(**kwargs)
54
+ class KoPINLLB(datasets.GeneratorBasedBuilder):
55
  """KoPI corpus."""
56
+ BUILDER_CONFIGS = [kopi_nllb_constructor(m) for m in _ALL_CONFIG ]
57
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def _info(self):
59
  return datasets.DatasetInfo(
60
  description=_DESCRIPTION,
 
62
  {
63
  "text": datasets.Value("string"),
64
  "url": datasets.Value("string"),
65
+ "score": datasets.Value("string"),
66
+ "source": datasets.Value("float32"),
67
  }
68
  ),
69
  supervised_keys=None,
 
72
  citation=_CITATION,
73
  )
74
  def _split_generators(self, dl_manager):
75
+ name = self.config.name.split("-")
76
+ if name[0] == "all":
77
+ train = [_BASE_URL.format(tipe=split[0],lang=m) for m in _CONF_LANG]
78
+ else:
79
+ train = [_BASE_URL.format(tipe=split[0],lang=split[1])]
80
  train_downloaded_files = dl_manager.download(train)
 
81
  return [
82
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files})
 
 
 
83
  ]
84
  def _generate_examples(self, filepaths):
85
  """This function returns the examples in the raw (text) form by iterating on all the files."""
Readme.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ KopI(Korpus Perayapan Indonesia)-NLLB, is Indonesian family language(aceh,bali,banjar,indonesia,jawa,minang,sunda) only extracted from NLLB Dataset, [allenai/nllb](https://huggingface.co/datasets/allenai/nllb)
2
+
3
+ each language set also filtered using some some deduplicate technique such as exact hash(md5) dedup technique and minhash LSH neardup
4
+
5
+ detail soon
dedup/ace_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c814775364f1d4e938484d92a1c29ed491817d6427d4ab1444bdcd6887f7e101
3
+ size 92988815
dedup/ban_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:170b5e584a26160a6205c404abdaeb0e4edb9364a5c979b3801465f44f98989b
3
+ size 31438703
dedup/bjn_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84b4d267a848f657f90f2d4f16bc74d0300cb1bc49730854977d2031cad60b2d
3
+ size 42942502
dedup/ind_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8dac1cfc113775aa6df3add1d1c50ed741bc7394b3dcc58d3edf85ea8167248
3
+ size 1110105292
dedup/jav_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:286b3513da3bfac7fce06d9dd71738800f8a294c6bfb3af28a0784d65d6601d9
3
+ size 331583433
dedup/min_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a9cc6188236fd91d6ae9cb40a4031806e0534bd1237771b19a1d1bfc897f01
3
+ size 9172846
dedup/sun_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad6c197b60beaae5bd17fd474e0838993d9d3b77e5747aedb1988de9682a5c57
3
+ size 266055937
neardup/ace_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f17b6979a85a6cc35478c39bee2526b5c8337280c97701f14d10c5558ee67d4
3
+ size 68967151
neardup/ban_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78be04a42856b1fa01d513428dcc249bbe0cfe424835853d70669999020b004f
3
+ size 23045995
neardup/bjn_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:336e168137867e2c99aefbba20965c32d6747288a1df37bfe9479e209870bd78
3
+ size 27643266
neardup/ind_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30eddc2b9258edd0e8f42b6a4f73e144aa6f05cee5da26dcc0fee97731008c42
3
+ size 858965669
neardup/jav_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64011f74b82b25d950d9b7110c04e3c0f3b412bd87b1b2c90f7ca9fd277decf3
3
+ size 94742689
neardup/min_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34420198dddb6639c85e1ce52930b6f6c3265ed47b295945e5586797e2d2e4fc
3
+ size 5962240
neardup/sun_Latn.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7bdc26acf07cccea969fe7054c6274afad98eed3c3d9d6529a8305ba6dc09f1
3
+ size 103281725