Datasets:

Languages:
Chinese
Multilinguality:
monolingual
Size Categories:
10M<n<100M
Language Creators:
other
Annotations Creators:
other
Source Datasets:
original
ArXiv:
License:
silver commited on
Commit
77110bb
1 Parent(s): 1567450

update script

Browse files
Files changed (1) hide show
  1. lccc.py +5 -47
lccc.py CHANGED
@@ -47,10 +47,6 @@ grammatically incorrect sentences, and incoherent conversations are filtered.
47
 
48
  _HOMEPAGE = "https://github.com/thu-coai/CDial-GPT"
49
  _LICENSE = "MIT"
50
-
51
- # TODO: Add link to the official dataset URLs here
52
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _URLS = {
55
  "large": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_large.jsonl.gz",
56
  "base": {
@@ -61,32 +57,17 @@ _URLS = {
61
  }
62
 
63
 
64
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
65
- class NewDataset(datasets.GeneratorBasedBuilder):
66
  """Large-scale Cleaned Chinese Conversation corpus."""
67
 
68
  VERSION = datasets.Version("1.0.0")
69
 
70
- # This is an example of a dataset with multiple configurations.
71
- # If you don't want/need to define several sub-sets in your dataset,
72
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
73
-
74
- # If you need to make complex sub-parts in the datasets with configurable options
75
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
76
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
77
-
78
- # You will be able to load one or the other configurations in the following list with
79
- # data = datasets.load_dataset('my_dataset', 'first_domain')
80
- # data = datasets.load_dataset('my_dataset', 'second_domain')
81
  BUILDER_CONFIGS = [
82
  datasets.BuilderConfig(name="large", version=VERSION, description="The large version of LCCC"),
83
  datasets.BuilderConfig(name="base", version=VERSION, description="The base version of LCCC"),
84
  ]
85
 
86
- # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
87
-
88
  def _info(self):
89
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
90
  features = datasets.Features(
91
  {
92
  "dialog": datasets.Value("string"),
@@ -109,56 +90,33 @@ class NewDataset(datasets.GeneratorBasedBuilder):
109
  )
110
 
111
  def _split_generators(self, dl_manager):
112
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
113
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
114
-
115
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
116
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
117
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
118
  urls = _URLS[self.config.name]
119
  downloaded_data = dl_manager.download_and_extract(urls)
120
  if self.config.name == "large":
121
  return [
122
  datasets.SplitGenerator(
123
  name=datasets.Split.TRAIN,
124
- gen_kwargs={
125
- "filepath": os.path.join(downloaded_data),
126
- "split": "train",
127
- }
128
  )
129
  ]
130
  if self.config.name == "base":
131
  return [
132
  datasets.SplitGenerator(
133
  name=datasets.Split.TRAIN,
134
- # These kwargs will be passed to _generate_examples
135
- gen_kwargs={
136
- "filepath": os.path.join(downloaded_data["train"]),
137
- "split": "train",
138
- },
139
  ),
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TEST,
142
- # These kwargs will be passed to _generate_examples
143
- gen_kwargs={
144
- "filepath": os.path.join(downloaded_data["train"]),
145
- "split": "test"
146
- },
147
  ),
148
  datasets.SplitGenerator(
149
  name=datasets.Split.VALIDATION,
150
- # These kwargs will be passed to _generate_examples
151
- gen_kwargs={
152
- "filepath": os.path.join(downloaded_data["valid"]),
153
- "split": "dev",
154
- },
155
  ),
156
  ]
157
 
158
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
159
  def _generate_examples(self, filepath, split):
160
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
161
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
162
  with open(filepath, encoding="utf-8") as f:
163
  for key, row in enumerate(f):
164
  row = row.strip()
47
 
48
  _HOMEPAGE = "https://github.com/thu-coai/CDial-GPT"
49
  _LICENSE = "MIT"
 
 
 
 
50
  _URLS = {
51
  "large": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_large.jsonl.gz",
52
  "base": {
57
  }
58
 
59
 
60
+ class LCCC(datasets.GeneratorBasedBuilder):
 
61
  """Large-scale Cleaned Chinese Conversation corpus."""
62
 
63
  VERSION = datasets.Version("1.0.0")
64
 
 
 
 
 
 
 
 
 
 
 
 
65
  BUILDER_CONFIGS = [
66
  datasets.BuilderConfig(name="large", version=VERSION, description="The large version of LCCC"),
67
  datasets.BuilderConfig(name="base", version=VERSION, description="The base version of LCCC"),
68
  ]
69
 
 
 
70
  def _info(self):
 
71
  features = datasets.Features(
72
  {
73
  "dialog": datasets.Value("string"),
90
  )
91
 
92
  def _split_generators(self, dl_manager):
 
 
 
 
 
 
93
  urls = _URLS[self.config.name]
94
  downloaded_data = dl_manager.download_and_extract(urls)
95
  if self.config.name == "large":
96
  return [
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TRAIN,
99
+ gen_kwargs={ "filepath": os.path.join(downloaded_data), "split": "train", }
 
 
 
100
  )
101
  ]
102
  if self.config.name == "base":
103
  return [
104
  datasets.SplitGenerator(
105
  name=datasets.Split.TRAIN,
106
+ gen_kwargs={ "filepath": os.path.join(downloaded_data["train"]), "split": "train", },
 
 
 
 
107
  ),
108
  datasets.SplitGenerator(
109
  name=datasets.Split.TEST,
110
+ gen_kwargs={ "filepath": os.path.join(downloaded_data["test"]), "split": "test" },
 
 
 
 
111
  ),
112
  datasets.SplitGenerator(
113
  name=datasets.Split.VALIDATION,
114
+ gen_kwargs={ "filepath": os.path.join(downloaded_data["valid"]), "split": "dev", },
 
 
 
 
115
  ),
116
  ]
117
 
118
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
119
  def _generate_examples(self, filepath, split):
 
 
120
  with open(filepath, encoding="utf-8") as f:
121
  for key, row in enumerate(f):
122
  row = row.strip()