Datasets:

Languages:
Indonesian
Multilinguality:
monolingual
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
acul commited on
Commit
042efab
1 Parent(s): dffff1e

add 2021_43

Browse files
2021_39/raw/2021_39_raw.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Cleaned Indonesian split of the mC4 corpus."""
16
+ import json
17
+ import glob
18
+ import gzip
19
+ import textwrap
20
+ import datasets
21
+ import zstandard as zstd
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+ file = sorted(glob.glob('/data/KoPI-CC/2021_39/raw/*.zst'))
25
+ _CITATION = """
26
+ @article{JMLR:v21:20-074,
27
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
28
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
29
+ journal = {Journal of Machine Learning Research},
30
+ year = {2020},
31
+ volume = {21},
32
+ number = {140},
33
+ pages = {1-67},
34
+ url = {http://jmlr.org/papers/v21/20-074.html}
35
+ }
36
+ """
37
+ _DESCRIPTION = """\
38
+ A thoroughly cleaned version of the Italian portion of the multilingual
39
+ colossal, cleaned version of Common Crawl's web crawl corpus (mC4) by AllenAI.
40
+ Based on Common Crawl dataset: "https://commoncrawl.org".
41
+ This is the processed version of Google's mC4 dataset by AllenAI, with further cleaning
42
+ detailed in the repository README file.
43
+ """
44
+ _HOMEPAGE = "https://github.com/allenai/allennlp/discussions/5056"
45
+ _LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
46
+ _BASE_URL = "https://huggingface.co/datasets/munggok/mc4-id/resolve/main/mc4-id-filter/c4-id{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
47
+ _CONFIGS = {
48
+ "tiny": {"train": 100, "validation": 1},
49
+ "small": {"train": 250, "validation": 2},
50
+ "medium": {"train": 500, "validation": 4},
51
+ "large": {"train": 750, "validation": 6},
52
+ "full": {"train": 1016, "validation": 8}
53
+ }
54
+ class OscarConfig(datasets.BuilderConfig):
55
+ """BuilderConfig for the Clean mC4 Italian."""
56
+ def __init__(self, **kwargs):
57
+ """BuilderConfig for Clean mC4 Italian.
58
+ Args:
59
+ **kwargs: keyword arguments forwarded to super.
60
+ """
61
+ super().__init__(**kwargs)
62
+ class Oscar(datasets.GeneratorBasedBuilder):
63
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
64
+ BUILDER_CONFIGS = [
65
+ OscarConfig(
66
+ name="full",
67
+ version=datasets.Version("1.0.0"),
68
+ description=textwrap.dedent(
69
+ f"""\
70
+ The full cleaned version of the Italian portion of the multilingual C4 corpus.
71
+ Estimated size of compressed files: 103GB
72
+ """
73
+ )
74
+ )
75
+ ]
76
+ def _info(self):
77
+ return datasets.DatasetInfo(
78
+ description=_DESCRIPTION,
79
+ features=datasets.Features(
80
+ {
81
+ "text": datasets.Value("string"),
82
+ "url": datasets.Value("string"),
83
+ "timestamp": datasets.Value("string"),
84
+ "meta": datasets.Value("string"),
85
+ }
86
+ ),
87
+ supervised_keys=None,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+ def _split_generators(self, dl_manager):
93
+ data_urls = {}
94
+ train_downloaded_files = file
95
+ return [
96
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
97
+ ]
98
+ def _generate_examples(self, filepaths):
99
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
100
+ id_ = 0
101
+ for filepath in filepaths:
102
+ logger.info(f"Generating examples from {filepath}")
103
+ with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
104
+ for line in f:
105
+ if line:
106
+ example = json.loads(line)
107
+ meta = dict()
108
+ meta["warc_headers"] = example["warc_headers"]
109
+ meta["warc_headers"]["warc-identified-content-language"] = example[
110
+ "warc_headers"
111
+ ].get("warc-identified-content-language")
112
+ meta["identification"] = example["metadata"]["identification"]
113
+ meta["annotations"] = example["metadata"]["annotation"]
114
+ meta["line_identifications"] = example["metadata"][
115
+ "sentence_identifications"
116
+ ]
117
+ yield id_, {'text':example['content'],'url':example['warc_headers']['warc-target-uri'],'timestamp':example['warc_headers']['warc-date'],"meta": json.dumps(meta)}
118
+ id_ += 1
2021_43/raw/2021_43_raw.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Cleaned Indonesian split of the mC4 corpus."""
16
+ import json
17
+ import glob
18
+ import gzip
19
+ import textwrap
20
+ import datasets
21
+ import zstandard as zstd
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+ file = sorted(glob.glob('/data/KoPI-CC/2021_43/raw/*.zst'))
25
+ _CITATION = """
26
+ @article{JMLR:v21:20-074,
27
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
28
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
29
+ journal = {Journal of Machine Learning Research},
30
+ year = {2020},
31
+ volume = {21},
32
+ number = {140},
33
+ pages = {1-67},
34
+ url = {http://jmlr.org/papers/v21/20-074.html}
35
+ }
36
+ """
37
+ _DESCRIPTION = """\
38
+ A thoroughly cleaned version of the Italian portion of the multilingual
39
+ colossal, cleaned version of Common Crawl's web crawl corpus (mC4) by AllenAI.
40
+ Based on Common Crawl dataset: "https://commoncrawl.org".
41
+ This is the processed version of Google's mC4 dataset by AllenAI, with further cleaning
42
+ detailed in the repository README file.
43
+ """
44
+ _HOMEPAGE = "https://github.com/allenai/allennlp/discussions/5056"
45
+ _LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
46
+ _BASE_URL = "https://huggingface.co/datasets/munggok/mc4-id/resolve/main/mc4-id-filter/c4-id{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
47
+ _CONFIGS = {
48
+ "tiny": {"train": 100, "validation": 1},
49
+ "small": {"train": 250, "validation": 2},
50
+ "medium": {"train": 500, "validation": 4},
51
+ "large": {"train": 750, "validation": 6},
52
+ "full": {"train": 1016, "validation": 8}
53
+ }
54
+ class OscarConfig(datasets.BuilderConfig):
55
+ """BuilderConfig for the Clean mC4 Italian."""
56
+ def __init__(self, **kwargs):
57
+ """BuilderConfig for Clean mC4 Italian.
58
+ Args:
59
+ **kwargs: keyword arguments forwarded to super.
60
+ """
61
+ super().__init__(**kwargs)
62
+ class Oscar(datasets.GeneratorBasedBuilder):
63
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
64
+ BUILDER_CONFIGS = [
65
+ OscarConfig(
66
+ name="full",
67
+ version=datasets.Version("1.0.0"),
68
+ description=textwrap.dedent(
69
+ f"""\
70
+ The full cleaned version of the Italian portion of the multilingual C4 corpus.
71
+ Estimated size of compressed files: 103GB
72
+ """
73
+ )
74
+ )
75
+ ]
76
+ def _info(self):
77
+ return datasets.DatasetInfo(
78
+ description=_DESCRIPTION,
79
+ features=datasets.Features(
80
+ {
81
+ "text": datasets.Value("string"),
82
+ "url": datasets.Value("string"),
83
+ "timestamp": datasets.Value("string"),
84
+ "meta": datasets.Value("string"),
85
+ }
86
+ ),
87
+ supervised_keys=None,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+ def _split_generators(self, dl_manager):
93
+ data_urls = {}
94
+ train_downloaded_files = file
95
+ return [
96
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
97
+ ]
98
+ def _generate_examples(self, filepaths):
99
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
100
+ id_ = 0
101
+ for filepath in filepaths:
102
+ logger.info(f"Generating examples from {filepath}")
103
+ with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
104
+ for line in f:
105
+ if line:
106
+ example = json.loads(line)
107
+ meta = dict()
108
+ meta["warc_headers"] = example["warc_headers"]
109
+ meta["warc_headers"]["warc-identified-content-language"] = example[
110
+ "warc_headers"
111
+ ].get("warc-identified-content-language")
112
+ meta["identification"] = example["metadata"]["identification"]
113
+ meta["annotations"] = example["metadata"]["annotation"]
114
+ meta["line_identifications"] = example["metadata"][
115
+ "sentence_identifications"
116
+ ]
117
+ yield id_, {'text':example['content'],'url':example['warc_headers']['warc-target-uri'],'timestamp':example['warc_headers']['warc-date'],"meta": json.dumps(meta)}
118
+ id_ += 1
2021_43/raw/id_meta_0.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b7da10f810a75619e169e386cd0439a82944e03f163f41a0c0cf9cbce15ea99
3
+ size 216039803
2021_43/raw/id_meta_1.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91bc96d1d7b86b256dff6308907a2f7066682096304f726628c0490fa1fafc8b
3
+ size 221156078
2021_43/raw/id_meta_10.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31cbee58a5490616078cd775d441b11ad31164ca840bc3b7836ad2b076cbdeb3
3
+ size 224980292
2021_43/raw/id_meta_11.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808b49f810176c78b919816ba3b35a0b5291b480d0b19065d30607e5e74e816c
3
+ size 245037996
2021_43/raw/id_meta_12.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:673fd7d893bdf8643f5c0e086a86aadae58be7aefeb56f37241eb899722d8144
3
+ size 217054896
2021_43/raw/id_meta_13.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78decd076931966dfce7e3b034beabeadaf7238834135e04cb6773b80118b1c2
3
+ size 235449131
2021_43/raw/id_meta_14.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70de5b719bc62c61a0c39ec560a2a9789fee34027135a46a893d5bdd7fb70d6c
3
+ size 236134405
2021_43/raw/id_meta_15.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2d2b738ec0a238c45f7ee22168bfc9152134bd3cd4c340f5f60c4bb0a2d5961
3
+ size 226982137
2021_43/raw/id_meta_16.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3322e114a2188d898fea02444c077adc5101c3887b5e653364c171f98c1994
3
+ size 207666945
2021_43/raw/id_meta_17.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38c95391cf2279638e29fbb281fb1b39e4f81ec8402a9e6668e0a88ecc7f8d55
3
+ size 231178903
2021_43/raw/id_meta_18.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b4531dc643836b03d490f03dfe016c835b4154d1e19a1897230c6baef77da8b
3
+ size 219346451
2021_43/raw/id_meta_19.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91462d0e97e443132476d446de50542ca170c28234b6dfe29a69a1caa3b6e0c5
3
+ size 221209959
2021_43/raw/id_meta_2.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a162ce5f21547e946fecbdc30a245198130f89201dc83bf2827683394a4863ce
3
+ size 214267502
2021_43/raw/id_meta_20.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3ff391b04b523fb1680538f9a67529184248245726009c5e36cb6c747502e28
3
+ size 211595064
2021_43/raw/id_meta_21.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3da3dbcca1dade6fab7fb9b3ead7a1419a4de45dc7f301d5f10adabe01c92746
3
+ size 229849969
2021_43/raw/id_meta_22.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c9d25e2298779e903aed944f29c8a64512a316c4d8f4bfa46c0d9a35755ef90
3
+ size 225686520
2021_43/raw/id_meta_23.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec3bdebd3fb1d96c1d20fcac289504b5bd85026ed634433ed6575771b44bb157
3
+ size 243511363
2021_43/raw/id_meta_24.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965cc16e72158a8378e7fa3e227d30db16edf2d795d19b05ce3d70f5487c4fa4
3
+ size 231492953
2021_43/raw/id_meta_25.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5588ae1cada9b8b50ff6a077819c4f85c6fc56f6a2075b85dcd549fb34697b54
3
+ size 234309813
2021_43/raw/id_meta_26.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e311febdf815e65a2519be7152c7da2e9d22d67c73caca0891e48846507dcd6
3
+ size 190426726
2021_43/raw/id_meta_27.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3aa80c3d9d14d0e54d130b737415934bfeb931e852cf789124cb3130f75eb20
3
+ size 203695712
2021_43/raw/id_meta_28.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2df9a03b08b1b01a7569adf9e63f85ddc970b3738e38addca040fd0038d6e22f
3
+ size 236046797
2021_43/raw/id_meta_29.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc441d81a357f2364a638eacc2fe888c3c52f076e4c43b5adaeea7ea8566ada9
3
+ size 218366538
2021_43/raw/id_meta_3.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13ffebd26976fdeafa3b2a005094f2502174af2a3145b8a4ca8025253cd61335
3
+ size 248084429
2021_43/raw/id_meta_30.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096ac1064ac25555bc094351720c5580b4297fcbf3eee6e1f5b1c6eebfae90bd
3
+ size 220982008
2021_43/raw/id_meta_31.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09df17a1e45b379645a40257fb6a14f5845f261bce561aa7b9548ad84713e743
3
+ size 240241870
2021_43/raw/id_meta_32.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5a429c1315dc9b6fa4ae0c5bcb3ad7622d0c9aecc7e53908c1c09701f936aa0
3
+ size 32393788
2021_43/raw/id_meta_33.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99690951236b65cb2c13a5aef12a84dc4012785af94e47ee29fcd547d44e308a
3
+ size 44545305
2021_43/raw/id_meta_34.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70a63a8669df3d138c3d781fad3c3f482e60ffe74b1ba1e1e19e87791b683354
3
+ size 27033451
2021_43/raw/id_meta_35.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c243aac1b6715cf42cc3510e7bfe52af6d258bda44a3e83cb2488b811ed0010
3
+ size 41590047
2021_43/raw/id_meta_4.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da24a812adca9dcc0e644908e2724a563e8638f73538c16fdf390269754fcac4
3
+ size 227280434
2021_43/raw/id_meta_5.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95fd92892a6e5678d12e7ddcb81047ce90eb3afa1fde526cff9edda85c9c7b1c
3
+ size 224084192
2021_43/raw/id_meta_6.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b566294b5e2bb8ef884162f567ae7db774123c64ab446f977ad6c2f7cf64512
3
+ size 228154536
2021_43/raw/id_meta_7.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451aa9465c88243917ba0e8bf1b090ddfaafa1330d0421a4afa3d504ed85521e
3
+ size 228589114
2021_43/raw/id_meta_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aef9df81b381984fdaebbd8eec08674f08094b75600ecb65b06c8dbc3197ee1b
3
+ size 216915798
2021_43/raw/id_meta_9.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cffb60c1e3f24651748c3af75f77f0847a11a79525ffe57d472be66ba00f7ca3
3
+ size 204397509