Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,50 @@
|
|
1 |
---
|
2 |
license: cc
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: cc
|
3 |
---
|
4 |
+
|
5 |
+
### Dataset Summary
|
6 |
+
|
7 |
+
KoPI-CC (Korpus Perayapan Indonesia)-CC is Indonesian Only Extract from Common Crawl snapshots ,each snapshots get extracted using ungoliant and get extra "filtering" using deduplication technique
|
8 |
+
|
9 |
+
detail soon
|
10 |
+
|
11 |
+
|
12 |
+
### Citation Information
|
13 |
+
|
14 |
+
```
|
15 |
+
|
16 |
+
@ARTICLE{2022arXiv220106642A,
|
17 |
+
author = {{Abadji}, Julien and {Ortiz Suarez}, Pedro and {Romary}, Laurent and {Sagot}, Beno{\^\i}t},
|
18 |
+
title = "{Towards a Cleaner Document-Oriented Multilingual Crawled Corpus}",
|
19 |
+
journal = {arXiv e-prints},
|
20 |
+
keywords = {Computer Science - Computation and Language},
|
21 |
+
year = 2022,
|
22 |
+
month = jan,
|
23 |
+
eid = {arXiv:2201.06642},
|
24 |
+
pages = {arXiv:2201.06642},
|
25 |
+
archivePrefix = {arXiv},
|
26 |
+
eprint = {2201.06642},
|
27 |
+
primaryClass = {cs.CL},
|
28 |
+
adsurl = {https://ui.adsabs.harvard.edu/abs/2022arXiv220106642A},
|
29 |
+
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
30 |
+
}
|
31 |
+
|
32 |
+
@inproceedings{AbadjiOrtizSuarezRomaryetal.2021,
|
33 |
+
author = {Julien Abadji and Pedro Javier Ortiz Su{\'a}rez and Laurent Romary and Beno{\^i}t Sagot},
|
34 |
+
title = {Ungoliant: An optimized pipeline for the generation of a very large-scale multilingual web corpus},
|
35 |
+
series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora (CMLC-9) 2021. Limerick, 12 July 2021 (Online-Event)},
|
36 |
+
editor = {Harald L{\"u}ngen and Marc Kupietz and Piotr Bański and Adrien Barbaresi and Simon Clematide and Ines Pisetta},
|
37 |
+
publisher = {Leibniz-Institut f{\"u}r Deutsche Sprache},
|
38 |
+
address = {Mannheim},
|
39 |
+
doi = {10.14618/ids-pub-10468},
|
40 |
+
url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-104688},
|
41 |
+
pages = {1 -- 9},
|
42 |
+
year = {2021},
|
43 |
+
abstract = {Since the introduction of large language models in Natural Language Processing, large raw corpora have played a crucial role in Computational Linguistics. However, most of these large raw corpora are either available only for English or not available to the general public due to copyright issues. Nevertheless, there are some examples of freely available multilingual corpora for training Deep Learning NLP models, such as the OSCAR and Paracrawl corpora. However, they have quality issues, especially for low-resource languages. Moreover, recreating or updating these corpora is very complex. In this work, we try to reproduce and improve the goclassy pipeline used to create the OSCAR corpus. We propose a new pipeline that is faster, modular, parameterizable, and well documented. We use it to create a corpus similar to OSCAR but larger and based on recent data. Also, unlike OSCAR, the metadata information is at the document level. We release our pipeline under an open source license and publish the corpus under a research-only license.},
|
44 |
+
language = {en}
|
45 |
+
}
|
46 |
+
|
47 |
+
|
48 |
+
```
|
49 |
+
|
50 |
+
|