Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:
system HF staff commited on
Commit
c2bf09b
1 Parent(s): c61e33f

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (3) hide show
  1. README.md +1 -0
  2. dummy/0.1.0/dummy_data.zip +0 -3
  3. movie_rationales.py +40 -27
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: null
1
  ---
2
+ pretty_name: MovieRationales
3
  languages:
4
  - en
5
  paperswithcode_id: null
dummy/0.1.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7fd146b11e415369fbca2c0d51fdb5e0b459516b7c2712bf717d39859a2547c3
3
- size 2390
 
 
 
movie_rationales.py CHANGED
@@ -18,7 +18,6 @@
18
 
19
 
20
  import json
21
- import os
22
 
23
  import datasets
24
 
@@ -49,6 +48,7 @@ class MovieRationales(datasets.GeneratorBasedBuilder):
49
  """Movie reviews with human annotated rationales."""
50
 
51
  VERSION = datasets.Version("0.1.0")
 
52
 
53
  def _info(self):
54
  return datasets.DatasetInfo(
@@ -67,43 +67,56 @@ class MovieRationales(datasets.GeneratorBasedBuilder):
67
 
68
  def _split_generators(self, dl_manager):
69
  """Returns SplitGenerators."""
70
- dl_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
71
- data_dir = os.path.join(dl_dir, "movies")
72
 
73
  return [
74
  datasets.SplitGenerator(
75
  name=datasets.Split.TRAIN,
76
- gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "train.jsonl")},
 
 
 
 
77
  ),
78
  datasets.SplitGenerator(
79
  name=datasets.Split.VALIDATION,
80
- gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "val.jsonl")},
 
 
 
 
81
  ),
82
  datasets.SplitGenerator(
83
  name=datasets.Split.TEST,
84
- gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "test.jsonl")},
 
 
 
 
85
  ),
86
  ]
87
 
88
- def _generate_examples(self, data_dir, filepath):
89
  """Yields examples."""
90
- reviews_dir = os.path.join(data_dir, "docs")
91
-
92
- with open(filepath, encoding="utf-8") as f:
93
- for line in f:
94
- row = json.loads(line)
95
- doc_id = row["annotation_id"]
96
- review_file = os.path.join(reviews_dir, doc_id)
97
- with open(review_file, encoding="utf-8") as f1:
98
- review_text = f1.read()
99
-
100
- evidences = []
101
- for evidence in row["evidences"]:
102
- for e in evidence:
103
- evidences.append(e["text"])
104
-
105
- yield doc_id, {
106
- "review": review_text,
107
- "label": row["classification"],
108
- "evidences": evidences,
109
- }
 
18
 
19
 
20
  import json
 
21
 
22
  import datasets
23
 
48
  """Movie reviews with human annotated rationales."""
49
 
50
  VERSION = datasets.Version("0.1.0")
51
+ test_dummy_data = False # dummy data don't support having a specific order for the files in the archive
52
 
53
  def _info(self):
54
  return datasets.DatasetInfo(
67
 
68
  def _split_generators(self, dl_manager):
69
  """Returns SplitGenerators."""
70
+ archive = dl_manager.download(_DOWNLOAD_URL)
71
+ data_dir = "movies/"
72
 
73
  return [
74
  datasets.SplitGenerator(
75
  name=datasets.Split.TRAIN,
76
+ gen_kwargs={
77
+ "reviews_dir": data_dir + "docs",
78
+ "filepath": data_dir + "train.jsonl",
79
+ "files": dl_manager.iter_archive(archive),
80
+ },
81
  ),
82
  datasets.SplitGenerator(
83
  name=datasets.Split.VALIDATION,
84
+ gen_kwargs={
85
+ "reviews_dir": data_dir + "docs",
86
+ "filepath": data_dir + "val.jsonl",
87
+ "files": dl_manager.iter_archive(archive),
88
+ },
89
  ),
90
  datasets.SplitGenerator(
91
  name=datasets.Split.TEST,
92
+ gen_kwargs={
93
+ "reviews_dir": data_dir + "docs",
94
+ "filepath": data_dir + "test.jsonl",
95
+ "files": dl_manager.iter_archive(archive),
96
+ },
97
  ),
98
  ]
99
 
100
+ def _generate_examples(self, reviews_dir, filepath, files):
101
  """Yields examples."""
102
+ reviews = {}
103
+ for path, f in files:
104
+ if path.startswith(reviews_dir):
105
+ reviews[path.split("/")[-1]] = f.read().decode("utf-8")
106
+ elif path == filepath:
107
+ for line in f:
108
+ row = json.loads(line.decode("utf-8"))
109
+ doc_id = row["annotation_id"]
110
+ review_text = reviews[doc_id]
111
+
112
+ evidences = []
113
+ for evidence in row["evidences"]:
114
+ for e in evidence:
115
+ evidences.append(e["text"])
116
+
117
+ yield doc_id, {
118
+ "review": review_text,
119
+ "label": row["classification"],
120
+ "evidences": evidences,
121
+ }
122
+ break