albertvillanova HF staff commited on
Commit
3e3042d
1 Parent(s): 144fe39

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (df640036109ffb1ef30763bbe7c5c8e4f5f72b98)
- Delete loading script (e90c21e8bdaa4868d95f261b5a3a70e09d17c6a3)

README.md CHANGED
@@ -1,15 +1,14 @@
1
  ---
2
  annotations_creators:
3
  - crowdsourced
4
- language:
5
- - en
6
  language_creators:
7
  - found
 
 
8
  license:
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
12
- pretty_name: Coarse Discourse
13
  size_categories:
14
  - 100K<n<1M
15
  source_datasets:
@@ -19,6 +18,7 @@ task_categories:
19
  task_ids:
20
  - multi-class-classification
21
  paperswithcode_id: coarse-discourse
 
22
  dataset_info:
23
  features:
24
  - name: title
@@ -51,10 +51,15 @@ dataset_info:
51
  dtype: string
52
  splits:
53
  - name: train
54
- num_bytes: 45443464
55
  num_examples: 116357
56
- download_size: 4636201
57
- dataset_size: 45443464
 
 
 
 
 
58
  ---
59
 
60
  # Dataset Card for "coarse_discourse"
1
  ---
2
  annotations_creators:
3
  - crowdsourced
 
 
4
  language_creators:
5
  - found
6
+ language:
7
+ - en
8
  license:
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
 
12
  size_categories:
13
  - 100K<n<1M
14
  source_datasets:
18
  task_ids:
19
  - multi-class-classification
20
  paperswithcode_id: coarse-discourse
21
+ pretty_name: Coarse Discourse
22
  dataset_info:
23
  features:
24
  - name: title
51
  dtype: string
52
  splits:
53
  - name: train
54
+ num_bytes: 45097556
55
  num_examples: 116357
56
+ download_size: 4256575
57
+ dataset_size: 45097556
58
+ configs:
59
+ - config_name: default
60
+ data_files:
61
+ - split: train
62
+ path: data/train-*
63
  ---
64
 
65
  # Dataset Card for "coarse_discourse"
coarse_discourse.py DELETED
@@ -1,116 +0,0 @@
1
- """TODO(coarse_discourse): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
-
8
-
9
- # TODO(coarse_discourse): BibTeX citation
10
- _CITATION = """\
11
- @inproceedings{coarsediscourse, title={Characterizing Online Discussion Using Coarse Discourse Sequences}, author={Zhang, Amy X. and Culbertson, Bryan and Paritosh, Praveen}, booktitle={Proceedings of the 11th International AAAI Conference on Weblogs and Social Media}, series={ICWSM '17}, year={2017}, location = {Montreal, Canada} }
12
- """
13
-
14
- # TODO(coarse_discourse):
15
- _DESCRIPTION = """\
16
- dataset contains discourse annotation and relation on threads from reddit during 2016
17
- """
18
- # From: https://github.com/google-research-datasets/coarse-discourse
19
- _URL = "https://raw.githubusercontent.com/google-research-datasets/coarse-discourse/master/coarse_discourse_dataset.json"
20
-
21
-
22
- class CoarseDiscourse(datasets.GeneratorBasedBuilder):
23
- """TODO(coarse_discourse): Short description of my dataset."""
24
-
25
- # TODO(coarse_discourse): Set up version.
26
- VERSION = datasets.Version("0.1.0")
27
-
28
- def _info(self):
29
- # TODO(coarse_discourse): Specifies the datasets.DatasetInfo object
30
- return datasets.DatasetInfo(
31
- # This is the description that will appear on the datasets page.
32
- description=_DESCRIPTION,
33
- # datasets.features.FeatureConnectors
34
- features=datasets.Features(
35
- {
36
- # These are the features of your dataset like images, labels ...
37
- "title": datasets.Value("string"),
38
- "is_self_post": datasets.Value("bool"),
39
- "subreddit": datasets.Value("string"),
40
- "url": datasets.Value("string"),
41
- "majority_link": datasets.Value("string"),
42
- "is_first_post": datasets.Value("bool"),
43
- "majority_type": datasets.Value("string"),
44
- "id_post": datasets.Value("string"),
45
- "post_depth": datasets.Value("int32"),
46
- "in_reply_to": datasets.Value("string"),
47
- "annotations": datasets.features.Sequence(
48
- {
49
- "annotator": datasets.Value("string"),
50
- "link_to_post": datasets.Value("string"),
51
- "main_type": datasets.Value("string"),
52
- }
53
- ),
54
- }
55
- ),
56
- # If there's a common (input, target) tuple from the features,
57
- # specify them here. They'll be used if as_supervised=True in
58
- # builder.as_dataset.
59
- supervised_keys=None,
60
- # Homepage of the dataset for documentation
61
- homepage="https://github.com/google-research-datasets/coarse-discourse",
62
- citation=_CITATION,
63
- )
64
-
65
- def _split_generators(self, dl_manager):
66
- """Returns SplitGenerators."""
67
- # TODO(coarse_discourse): Downloads the data and defines the splits
68
- # dl_manager is a datasets.download.DownloadManager that can be used to
69
- # download and extract URLs
70
- data_path = dl_manager.download(_URL)
71
- return [
72
- datasets.SplitGenerator(
73
- name=datasets.Split.TRAIN,
74
- # These kwargs will be passed to _generate_examples
75
- gen_kwargs={
76
- "filepath": data_path,
77
- },
78
- ),
79
- ]
80
-
81
- def _generate_examples(self, filepath):
82
- """Yields examples."""
83
- # TODO(coarse_discourse): Yields (key, example) tuples from the dataset
84
- with open(filepath, encoding="utf-8") as f:
85
- for id_, row in enumerate(f):
86
- data = json.loads(row)
87
- url = data.get("url", "")
88
- is_self_post = data.get("is_self_post", "")
89
- subreddit = data.get("subreddit", "")
90
- title = data.get("title", "")
91
- posts = data.get("posts", "")
92
- for id1, post in enumerate(posts):
93
- maj_link = post.get("majority_link", "")
94
- maj_type = post.get("majority_type", "")
95
- id_post = post.get("id", "")
96
- is_first_post = post.get("is_firs_post", "")
97
- post_depth = post.get("post_depth", -1)
98
- in_reply_to = post.get("in_reply_to", "")
99
- annotations = post["annotations"]
100
- annotators = [annotation.get("annotator", "") for annotation in annotations]
101
- main_types = [annotation.get("main_type", "") for annotation in annotations]
102
- link_posts = [annotation.get("linkk_to_post", "") for annotation in annotations]
103
-
104
- yield str(id_) + "_" + str(id1), {
105
- "title": title,
106
- "is_self_post": is_self_post,
107
- "subreddit": subreddit,
108
- "url": url,
109
- "majority_link": maj_link,
110
- "is_first_post": is_first_post,
111
- "majority_type": maj_type,
112
- "id_post": id_post,
113
- "post_depth": post_depth,
114
- "in_reply_to": in_reply_to,
115
- "annotations": {"annotator": annotators, "link_to_post": link_posts, "main_type": main_types},
116
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b0e10b437d8578601a698afccbbf1554a856082300385a8a0f3b59132faf5b
3
+ size 4256575