albertvillanova HF staff commited on
Commit
1fa3a8e
1 Parent(s): 69a8c7b

Update source data to 1.3 version (#3)

Browse files

- Update source data to 1.3 version in loading script (c0c52bb2b410876452da67cc32282c73529e1c48)
- Update metadata in dataset card (25831b3e5a69fb766310220298e42e5d69c0e57d)

Files changed (2) hide show
  1. README.md +10 -11
  2. air_dialogue.py +7 -11
README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- pretty_name: AirDialogue
3
  annotations_creators:
4
  - crowdsourced
5
  language_creators:
@@ -23,7 +22,7 @@ task_ids:
23
  - dialogue-modeling
24
  - language-modeling
25
  - masked-language-modeling
26
- paperswithcode_id: null
27
  dataset_info:
28
  - config_name: air_dialogue_data
29
  features:
@@ -83,13 +82,13 @@ dataset_info:
83
  dtype: bool_
84
  splits:
85
  - name: train
86
- num_bytes: 353721137
87
  num_examples: 321459
88
  - name: validation
89
- num_bytes: 44442238
90
  num_examples: 40363
91
- download_size: 272898923
92
- dataset_size: 398163375
93
  - config_name: air_dialogue_kb
94
  features:
95
  - name: kb
@@ -124,13 +123,13 @@ dataset_info:
124
  dtype: int32
125
  splits:
126
  - name: train
127
- num_bytes: 782592158
128
  num_examples: 321459
129
  - name: validation
130
- num_bytes: 98269789
131
  num_examples: 40363
132
- download_size: 272898923
133
- dataset_size: 880861947
134
  ---
135
 
136
  # Dataset Card for air_dialogue
@@ -309,4 +308,4 @@ cc-by-nc-4.0
309
 
310
  ### Contributions
311
 
312
- Thanks to [@skyprince999](https://github.com/skyprince999) for adding this dataset.
 
1
  ---
 
2
  annotations_creators:
3
  - crowdsourced
4
  language_creators:
 
22
  - dialogue-modeling
23
  - language-modeling
24
  - masked-language-modeling
25
+ pretty_name: AirDialogue
26
  dataset_info:
27
  - config_name: air_dialogue_data
28
  features:
 
82
  dtype: bool_
83
  splits:
84
  - name: train
85
+ num_bytes: 353718365
86
  num_examples: 321459
87
  - name: validation
88
+ num_bytes: 44441818
89
  num_examples: 40363
90
+ download_size: 303598343
91
+ dataset_size: 398160183
92
  - config_name: air_dialogue_kb
93
  features:
94
  - name: kb
 
123
  dtype: int32
124
  splits:
125
  - name: train
126
+ num_bytes: 782590970
127
  num_examples: 321459
128
  - name: validation
129
+ num_bytes: 98269609
130
  num_examples: 40363
131
+ download_size: 303598343
132
+ dataset_size: 880860579
133
  ---
134
 
135
  # Dataset Card for air_dialogue
 
308
 
309
  ### Contributions
310
 
311
+ Thanks to [@skyprince999](https://github.com/skyprince999) for adding this dataset.
air_dialogue.py CHANGED
@@ -56,17 +56,14 @@ _LICENSE = "cc-by-nc-4.0"
56
  # TODO: Add link to the official dataset URLs here
57
  # The HuggingFace dataset library don't host the datasets but only point to the original files
58
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
59
- _URLs = {
60
- "air_dialogue_data": "https://storage.googleapis.com/airdialogue/airdialogue_data.tar.gz",
61
- "air_dialogue_kb": "https://storage.googleapis.com/airdialogue/airdialogue_data.tar.gz",
62
- }
63
 
64
 
65
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
66
  class AirDialogue(datasets.GeneratorBasedBuilder):
67
  """TODO: Short description of my dataset."""
68
 
69
- VERSION = datasets.Version("1.1.0")
70
 
71
  # This is an example of a dataset with multiple configurations.
72
  # If you don't want/need to define several sub-sets in your dataset,
@@ -183,14 +180,13 @@ class AirDialogue(datasets.GeneratorBasedBuilder):
183
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
184
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
185
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
186
- my_urls = _URLs[self.config.name]
187
- archive = dl_manager.download(my_urls)
188
  if self.config.name == "air_dialogue_data":
189
- train = "airdialogue_data/airdialogue/train_data.json"
190
- dev = "airdialogue_data/airdialogue/dev_data.json"
191
  else:
192
- train = "airdialogue_data/airdialogue/train_kb.json"
193
- dev = "airdialogue_data/airdialogue/dev_kb.json"
194
 
195
  return [
196
  datasets.SplitGenerator(
 
56
  # TODO: Add link to the official dataset URLs here
57
  # The HuggingFace dataset library don't host the datasets but only point to the original files
58
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
59
+ _URL = "https://storage.googleapis.com/airdialogue/airdialogue_data.tar.gz"
 
 
 
60
 
61
 
62
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
63
  class AirDialogue(datasets.GeneratorBasedBuilder):
64
  """TODO: Short description of my dataset."""
65
 
66
+ VERSION = datasets.Version("1.3.0")
67
 
68
  # This is an example of a dataset with multiple configurations.
69
  # If you don't want/need to define several sub-sets in your dataset,
 
180
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
181
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
182
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
183
+ archive = dl_manager.download(_URL)
 
184
  if self.config.name == "air_dialogue_data":
185
+ train = "airdialogue_data_1.3/airdialogue/train_data.json"
186
+ dev = "airdialogue_data_1.3/airdialogue/dev_data.json"
187
  else:
188
+ train = "airdialogue_data_1.3/airdialogue/train_kb.json"
189
+ dev = "airdialogue_data_1.3/airdialogue/dev_kb.json"
190
 
191
  return [
192
  datasets.SplitGenerator(