parquet-converter commited on
Commit
f864ef3
·
1 Parent(s): 0d9e409

Update parquet files

Browse files
README.md DELETED
@@ -1,5 +0,0 @@
1
- For details, please refer to the following links.
2
-
3
- Github repo: https://github.com/amazon-research/SC2QA-DRIL
4
-
5
- Paper: [Generating Self-Contained and Summary-Centric Question Answer Pairs via Differentiable Reward Imitation Learning](https://arxiv.org/pdf/2109.04689.pdf)
 
 
 
 
 
 
val.csv → plain_text/sc2q_commoncrawl_large-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b86c5e919f27a278d08c3ef5e00d3ffda767c371efbc0d07dfaf1ccb9ff6d3
3
- size 108953981
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78c9414924cd40c3b8ea51ace4d8ef0fabb0736955a197f214360de03f6755e8
3
+ size 124490921
test.csv → plain_text/sc2q_commoncrawl_large-train-00000-of-00004.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c67fd9e0170d188a9a6bb5a7e15a56ae2347a4ae4d9683f8aefedad508993cc8
3
- size 212319294
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a127e09dbbaaefcad6b40f2af7bdc22582c050727544f1214af53f2780d5fbf
3
+ size 305887888
train.csv → plain_text/sc2q_commoncrawl_large-train-00001-of-00004.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80624f565c91c646006a8d028dcc27021d5cbaa22e3d0be78f78be53d5815efb
3
- size 1886633620
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:992bf892111c9b72113e841a29bacdde2fd0fe4419b374c7dece8a32dd269605
3
+ size 303642833
plain_text/sc2q_commoncrawl_large-train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b11ca8d6f877c20741e7ea483c24934be0b2ca32f51c2ac2323f2b5156af54e7
3
+ size 303434255
plain_text/sc2q_commoncrawl_large-train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3f9b2bad523bb1be8ed423fa9f8795e19632393f2b08ebaf660dbddd84835dd
3
+ size 215279088
plain_text/sc2q_commoncrawl_large-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37b68b81edf198f59e89a4f71910db740c4c4ec172a8b7cddb794b34addbac01
3
+ size 62867731
sc2q_commoncrawl_large.py DELETED
@@ -1,99 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """(SC)^2QA: Self-Contained Summary-Centric QA Dataset.
18
- This dataset (https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large) contains 529,039 question and article pairs.
19
- If you want {Question, Article, Summary, Length Constraint} 4-tuples, please load sc2qa_commoncrawl (https://huggingface.co/datasets/sc2qa/sc2qa_commoncrawl)
20
- """
21
-
22
- import csv
23
-
24
- import datasets
25
-
26
-
27
- logger = datasets.logging.get_logger(__name__)
28
-
29
-
30
- _CITATION = """\
31
- @article{zhou2021generating,
32
- author = {Li Zhou, Kevin Small, Yong Zhang, Sandeep Atluri},
33
- title = "{Generating Self-Contained and Summary-Centric Question Answer Pairs via Differentiable Reward Imitation Learning}",
34
- conference = {The 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP 2021)},
35
- year = 2021,
36
- }
37
- """
38
-
39
- _DESCRIPTION = """\
40
- """
41
-
42
- _URLS = {
43
- "train":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large/resolve/main/train.csv",
44
- "val":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large/resolve/main/val.csv",
45
- "test":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large/resolve/main/test.csv",
46
- }
47
-
48
- class SC2QAConfig(datasets.BuilderConfig):
49
- """BuilderConfig for (SC)^2QA."""
50
-
51
- def __init__(self, **kwargs):
52
- """BuilderConfig for (SC)^2QA.
53
-
54
- Args:
55
- **kwargs: keyword arguments forwarded to super.
56
- """
57
- super(SC2QAConfig, self).__init__(**kwargs)
58
-
59
-
60
- class SC2QA(datasets.GeneratorBasedBuilder):
61
- BUILDER_CONFIGS = [
62
- SC2QAConfig(
63
- name="plain_text",
64
- version=datasets.Version("1.0.0", ""),
65
- description="Plain text",
66
- ),
67
- ]
68
-
69
- def _info(self):
70
- # Should return a datasets.DatasetInfo object
71
- return datasets.DatasetInfo(
72
- description=_DESCRIPTION,
73
- features=datasets.Features(
74
- {
75
- "question": datasets.Value("string"),
76
- "article": datasets.Value("string"),
77
- "url": datasets.Value("string"),
78
- }
79
- ),
80
- citation=_CITATION,
81
- )
82
-
83
- def _split_generators(self, dl_manager):
84
- downloaded_files = dl_manager.download_and_extract(_URLS)
85
-
86
- return [
87
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
88
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
89
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
90
- ]
91
-
92
- def _generate_examples(self, filepath):
93
- """This function returns the examples in the raw (text) form."""
94
- logger.info("generating examples from = %s", filepath)
95
- key = 0
96
- with open(filepath, encoding="ascii", errors='ignore') as f:
97
- csv_reader = csv.DictReader(f)
98
- for i, row in enumerate(csv_reader):
99
- yield i, row