Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
shannons commited on
Commit
efb73aa
1 Parent(s): 8dfe801

Add v20220616 release files

Browse files
.gitattributes CHANGED
@@ -39,3 +39,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
39
  *.mp3 filter=lfs diff=lfs merge=lfs -text
40
  *.ogg filter=lfs diff=lfs merge=lfs -text
41
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
39
  *.mp3 filter=lfs diff=lfs merge=lfs -text
40
  *.ogg filter=lfs diff=lfs merge=lfs -text
41
  *.wav filter=lfs diff=lfs merge=lfs -text
42
+ # MultiLexSum Specific
43
+ *.json filter=lfs diff=lfs merge=lfs -text
multi_lexsum.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Dict, Any, Tuple
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+ from datasets.tasks import Summarization
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+
11
+ def _load_jsonl(filename):
12
+ with open(filename, "r") as fp:
13
+ jsonl_content = fp.read()
14
+
15
+ result = [json.loads(jline) for jline in jsonl_content.splitlines()]
16
+ return result
17
+
18
+
19
+ def _load_json(filepath):
20
+
21
+ with open(filepath, "r") as fp:
22
+ res = json.load(fp)
23
+ return res
24
+
25
+
26
+ _CITATION = """
27
+ @article{Shen2022MultiLexSum,
28
+ author = {Zejiang Shen and
29
+ Kyle Lo and
30
+ Lauren Yu and
31
+ Nathan Dahlberg and
32
+ Margo Schlanger and
33
+ Doug Downey},
34
+ title = {Multi-LexSum: Real-World Summaries of Civil Rights Lawsuits at Multiple Granularities},
35
+ journal = {CoRR},
36
+ volume = {abs/2206.10883},
37
+ year = {2022},
38
+ url = {https://doi.org/10.48550/arXiv.2206.10883},
39
+ doi = {10.48550/arXiv.2206.10883}
40
+ }
41
+ """ # TODO
42
+
43
+ _DESCRIPTION = """
44
+ Multi-LexSum is a multi-doc summarization dataset for civil rights litigation lawsuits with summaries of three granularities.
45
+ """ # TODO: Update with full abstract
46
+
47
+ _HOMEPAGE = "https://multilexsum.github.io"
48
+
49
+ # _BASE_URL = "https://ai2-s2-research.s3.us-west-2.amazonaws.com/multilexsum/releases"
50
+ _BASE_URL = "https://huggingface.co/datasets/allenai/multi_lexsum/resolve/main/releases"
51
+ _FILES = {
52
+ "train": "train.json",
53
+ "dev": "dev.json",
54
+ "test": "test.json",
55
+ "sources": "sources.json",
56
+ }
57
+
58
+
59
+ class MultiLexsumConfig(datasets.BuilderConfig):
60
+ """BuilderConfig for LexSum."""
61
+
62
+ def __init__(self, **kwargs):
63
+ """BuilderConfig for LexSum.
64
+ Args:
65
+ **kwargs: keyword arguments forwarded to super.
66
+ """
67
+ super(MultiLexsumConfig, self).__init__(**kwargs)
68
+
69
+
70
+ class MultiLexsum(datasets.GeneratorBasedBuilder):
71
+ """MultiLexSum Dataset: a multi-doc summarization dataset for
72
+ civil rights litigation lawsuits with summaries of three granularities.
73
+ """
74
+
75
+ BUILDER_CONFIGS = [
76
+ MultiLexsumConfig(
77
+ name="v20220616",
78
+ version=datasets.Version("1.0.0", "Public v1.0 release."),
79
+ description="The v1.0 Multi-LexSum dataset",
80
+ ),
81
+ ]
82
+
83
+ def _info(self):
84
+ return datasets.DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=datasets.Features(
87
+ {
88
+ "id": datasets.Value("string"),
89
+ "sources": datasets.Sequence(datasets.Value("string")),
90
+ "summary/long": datasets.Value("string"),
91
+ "summary/short": datasets.Value("string"),
92
+ "summary/tiny": datasets.Value("string"),
93
+ }
94
+ ),
95
+ supervised_keys=None,
96
+ homepage=_HOMEPAGE,
97
+ citation=_CITATION,
98
+ task_templates=[
99
+ Summarization(text_column="source", summary_column="summary/long")
100
+ ],
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+
105
+ base_url = _BASE_URL if self.config.data_dir is None else self.config.data_dir
106
+ downloaded_files = dl_manager.download_and_extract(
107
+ {
108
+ name: f"{base_url}/{self.config.name}/{filename}"
109
+ for name, filename in _FILES.items()
110
+ }
111
+ )
112
+ # Given sources is a large file, we read it first
113
+ sources = _load_json(downloaded_files["sources"])
114
+
115
+ return [
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.TRAIN,
118
+ gen_kwargs={
119
+ "subset_file": downloaded_files["train"],
120
+ "sources": sources,
121
+ },
122
+ ),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.VALIDATION,
125
+ gen_kwargs={
126
+ "subset_file": downloaded_files["dev"],
127
+ "sources": sources,
128
+ },
129
+ ),
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TEST,
132
+ gen_kwargs={
133
+ "subset_file": downloaded_files["test"],
134
+ "sources": sources,
135
+ },
136
+ ),
137
+ ]
138
+
139
+ def _generate_examples(self, subset_file: str, sources: Dict[str, Dict]):
140
+ """This function returns the examples in the raw (text) form."""
141
+ logger.info(f"generating examples from = {subset_file}")
142
+
143
+ subset_cases = _load_jsonl(subset_file)
144
+ for case_data in subset_cases:
145
+ case_sources = [
146
+ sources[source_id]["doc_text"]
147
+ for source_id in case_data["case_documents"]
148
+ ]
149
+ yield case_data["case_id"], {
150
+ "id": case_data["case_id"],
151
+ "sources": case_sources,
152
+ "summary/long": case_data["summary/long"],
153
+ "summary/short": case_data["summary/short"],
154
+ "summary/tiny": case_data["summary/tiny"],
155
+ }
releases/v20220616/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00b5110633b1b3d5c33e6cc4645525b3633cbb1844fcdc63315f6b2b340fa958
3
+ size 2281645
releases/v20220616/sources.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d15a29d05ee4c8052270bf630633ec5b8c5ff7dab6cc480c009dc50f76c8ce24
3
+ size 2219115572
releases/v20220616/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64fafb058f84cef09fc3d89df3ef913ee824d28f62bde9c1c9f52cc4d7c5b40a
3
+ size 4272330
releases/v20220616/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edb45aee04a4aa1eebce2ec05880304322f29baf1b7bcf2c23409c020cae6aca
3
+ size 15711733