nurik040404 commited on
Commit
82c059c
1 Parent(s): 9d969d4

Update mse.py

Browse files
Files changed (1) hide show
  1. mse.py +82 -1
mse.py CHANGED
@@ -33,4 +33,85 @@ features = Features({
33
  "author_rep": Value("string"),
34
  }),
35
  }),
36
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  "author_rep": Value("string"),
34
  }),
35
  }),
36
+ })
37
+
38
+ # coding=utf-8
39
+ """The dataset is a collection of Question and Answer automatically extracted from Stack Exchange community network."""
40
+
41
+
42
+ import csv
43
+ import json
44
+ import os
45
+ import zstandard
46
+
47
+ import datasets
48
+
49
+ # TODO: Add description of the dataset here
50
+ # You can copy an official description
51
+ _DESCRIPTION = """\
52
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
53
+ """
54
+
55
+ # TODO: Add a link to an official homepage for the dataset here
56
+ _HOMEPAGE = "https://huggingface.co/datasets/nurik040404/mse"
57
+ _URL = 'dataset.jsonl.zst'
58
+
59
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
60
+ class StackExchange(datasets.GeneratorBasedBuilder):
61
+ """The dataset is a collection of Question and Answer automatically extracted from match Stack Exchange community."""
62
+
63
+ VERSION = datasets.Version("1.0.0")
64
+
65
+ BUILDER_CONFIG = datasets.BuilderConfig(name=_URL)
66
+
67
+
68
+ def _info(self):
69
+ return datasets.DatasetInfo(
70
+ # This is the description that will appear on the datasets page.
71
+ description=_DESCRIPTION,
72
+ # This defines the different columns of the dataset and their types
73
+ features=features, # Here we define them above because they are different between the two configurations
74
+ # If there's a common (input, target) tuple from the features,
75
+ # specify them here. They'll be used if as_supervised=True in
76
+ # builder.as_dataset.
77
+ supervised_keys=None,
78
+ # Homepage of the dataset for documentation
79
+ homepage=_HOMEPAGE,
80
+ # License for the dataset if available
81
+ # license=_LICENSE,
82
+ # Citation for the dataset
83
+ # citation=_CITATION,
84
+ )
85
+
86
+ def _split_generators(self, dl_manager):
87
+ """Returns SplitGenerators."""
88
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
89
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
90
+
91
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
92
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
93
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
94
+ data_file = dl_manager.download_and_extract([_URL])
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ # These kwargs will be passed to _generate_examples
99
+ gen_kwargs={
100
+ "filepath": data_file,
101
+ },
102
+ )
103
+ ]
104
+
105
+ def _generate_examples(
106
+ self, filepath # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
107
+ ):
108
+ """ Yields examples as (key, example) tuples. """
109
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
110
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
111
+
112
+ with open(filepath, encoding="utf-8") as f:
113
+ dctx = zstandard.ZstdDecompressor()
114
+ data = dctx.decompress(f)
115
+ for id_, row in enumerate(data):
116
+ data = json.loads(row)
117
+ yield id_, data