Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:

Support streaming

#1
by albertvillanova HF staff - opened
Files changed (1) hide show
  1. coarse_discourse.py +4 -4
coarse_discourse.py CHANGED
@@ -2,7 +2,6 @@
2
 
3
 
4
  import json
5
- import os
6
 
7
  import datasets
8
 
@@ -16,7 +15,8 @@ _CITATION = """\
16
  _DESCRIPTION = """\
17
  dataset contains discourse annotation and relation on threads from reddit during 2016
18
  """
19
- _URL = "https://github.com/google-research-datasets/coarse-discourse/archive/master.zip"
 
20
 
21
 
22
  class CoarseDiscourse(datasets.GeneratorBasedBuilder):
@@ -67,13 +67,13 @@ class CoarseDiscourse(datasets.GeneratorBasedBuilder):
67
  # TODO(coarse_discourse): Downloads the data and defines the splits
68
  # dl_manager is a datasets.download.DownloadManager that can be used to
69
  # download and extract URLs
70
- dl_dir = dl_manager.download_and_extract(_URL)
71
  return [
72
  datasets.SplitGenerator(
73
  name=datasets.Split.TRAIN,
74
  # These kwargs will be passed to _generate_examples
75
  gen_kwargs={
76
- "filepath": os.path.join(dl_dir, "coarse-discourse-master", "coarse_discourse_dataset.json")
77
  },
78
  ),
79
  ]
 
2
 
3
 
4
  import json
 
5
 
6
  import datasets
7
 
 
15
  _DESCRIPTION = """\
16
  dataset contains discourse annotation and relation on threads from reddit during 2016
17
  """
18
+ # From: https://github.com/google-research-datasets/coarse-discourse
19
+ _URL = "https://raw.githubusercontent.com/google-research-datasets/coarse-discourse/master/coarse_discourse_dataset.json"
20
 
21
 
22
  class CoarseDiscourse(datasets.GeneratorBasedBuilder):
 
67
  # TODO(coarse_discourse): Downloads the data and defines the splits
68
  # dl_manager is a datasets.download.DownloadManager that can be used to
69
  # download and extract URLs
70
+ data_path = dl_manager.download(_URL)
71
  return [
72
  datasets.SplitGenerator(
73
  name=datasets.Split.TRAIN,
74
  # These kwargs will be passed to _generate_examples
75
  gen_kwargs={
76
+ "filepath": data_path,
77
  },
78
  ),
79
  ]