Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
multi_lexsum / multi_lexsum.py
shannons's picture
Add v20230518 release file
8e0586b
import json
import os
from typing import Any, Dict, List, Tuple, Union
import datasets
from datasets.tasks import Summarization
logger = datasets.logging.get_logger(__name__)
def _load_jsonl(filename):
with open(filename, "r") as fp:
jsonl_content = fp.read()
result = [json.loads(jline) for jline in jsonl_content.splitlines()]
return result
def _load_json(filepath):
with open(filepath, "r") as fp:
res = json.load(fp)
return res
_CITATION = """
@article{Shen2022MultiLexSum,
author = {Zejiang Shen and
Kyle Lo and
Lauren Yu and
Nathan Dahlberg and
Margo Schlanger and
Doug Downey},
title = {Multi-LexSum: Real-World Summaries of Civil Rights Lawsuits at Multiple Granularities},
journal = {CoRR},
volume = {abs/2206.10883},
year = {2022},
url = {https://doi.org/10.48550/arXiv.2206.10883},
doi = {10.48550/arXiv.2206.10883}
}
""" # TODO
_DESCRIPTION = """
Multi-LexSum is a multi-doc summarization dataset for civil rights litigation lawsuits with summaries of three granularities.
""" # TODO: Update with full abstract
_HOMEPAGE = "https://multilexsum.github.io"
# _BASE_URL = "https://ai2-s2-research.s3.us-west-2.amazonaws.com/multilexsum/releases"
_BASE_URL = "https://huggingface.co/datasets/allenai/multi_lexsum/resolve/main/releases"
_FILES = {
"train": "train.json",
"dev": "dev.json",
"test": "test.json",
"sources": "sources.json",
}
class MultiLexsumConfig(datasets.BuilderConfig):
"""BuilderConfig for LexSum."""
def __init__(self, **kwargs):
"""BuilderConfig for LexSum.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MultiLexsumConfig, self).__init__(**kwargs)
class MultiLexsum(datasets.GeneratorBasedBuilder):
"""MultiLexSum Dataset: a multi-doc summarization dataset for
civil rights litigation lawsuits with summaries of three granularities.
"""
BUILDER_CONFIGS = [
MultiLexsumConfig(
name="v20220616",
version=datasets.Version("1.0.0", "Public v1.0 release."),
description="The v1.0 Multi-LexSum dataset",
),
MultiLexsumConfig(
name="v20230518",
version=datasets.Version("1.1.0", "Public v1.1 release."),
description="It adds additional metadata for documents and cases",
),
]
def _info(self):
if self.config.name == "v20220616":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"sources": datasets.Sequence(datasets.Value("string")),
"summary/long": datasets.Value("string"),
"summary/short": datasets.Value("string"),
"summary/tiny": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[
Summarization(text_column="source", summary_column="summary/long")
],
)
elif self.config.name == "v20230518":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"sources": datasets.Sequence(datasets.Value("string")),
"sources_metadata": datasets.Sequence(
{
"doc_id": datasets.Value("string"),
"doc_type": datasets.Value("string"),
"doc_title": datasets.Value("string"),
"parser": datasets.Value("string"),
"is_ocr": datasets.Value("bool"),
"url": datasets.Value("string"),
}
),
"summary/long": datasets.Value("string"),
"summary/short": datasets.Value("string"),
"summary/tiny": datasets.Value("string"),
"case_metadata": datasets.Features(
{
# fmt: off
"case_name": datasets.Value("string"),
"case_type": datasets.Value("string"),
"filing_date": datasets.Value("string"),
"filing_year": datasets.Value("string"),
"case_ongoing": datasets.Value("string"),
"case_ongoing_record_time": datasets.Value("string"),
"closing_year": datasets.Value("string"),
"order_start_year": datasets.Value("string"),
"order_end_year": datasets.Value("string"),
"defendant_payment": datasets.Value("string"),
"class_action_sought": datasets.Value("string"),
"class_action_granted": datasets.Value("string"),
"attorney_orgs": [datasets.Value("string")],
"prevailing_party": datasets.Value("string"),
"plaintiff_types": [datasets.Value("string")],
"plaintiff_description": datasets.Value("string"),
"constitutional_clauses": [datasets.Value("string")],
"causes_of_action": [datasets.Value("string")],
"summary_authors": [datasets.Value("string")],
"case_url": datasets.Value("string"),
# fmt: on
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[
Summarization(text_column="source", summary_column="summary/long")
],
)
def _split_generators(self, dl_manager):
base_url = _BASE_URL if self.config.data_dir is None else self.config.data_dir
downloaded_files = dl_manager.download_and_extract(
{
name: f"{base_url}/{self.config.name}/{filename}"
for name, filename in _FILES.items()
}
)
# Given sources is a large file, we read it first
sources = _load_json(downloaded_files["sources"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"subset_file": downloaded_files["train"],
"sources": sources,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"subset_file": downloaded_files["dev"],
"sources": sources,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"subset_file": downloaded_files["test"],
"sources": sources,
},
),
]
def _generate_examples(self, subset_file: str, sources: Dict[str, Dict]):
"""This function returns the examples in the raw (text) form."""
logger.info(f"generating examples from = {subset_file}")
if self.config.name == "v20220616":
subset_cases = _load_jsonl(subset_file)
for case_data in subset_cases:
case_sources = [
sources[source_id]["doc_text"]
for source_id in case_data["case_documents"]
]
yield case_data["case_id"], {
"id": case_data["case_id"],
"sources": case_sources,
"summary/long": case_data["summary/long"],
"summary/short": case_data["summary/short"],
"summary/tiny": case_data["summary/tiny"],
}
elif self.config.name == "v20230518":
subset_cases = _load_jsonl(subset_file)
for idx, case_data in enumerate(subset_cases):
case_sources = [
sources[source_id]["doc_text"]
for source_id in case_data["case_documents"]
]
case_source_metadata = [
{
key: val
for key, val in sources[source_id].items()
if key != "doc_text"
}
for source_id in case_data["case_documents"]
]
case_metadata = {
"case_name": case_data["case_name"],
"case_type": case_data["case_type"],
"filing_date": case_data["filing_date"],
"filing_year": case_data["filing_year"],
"case_ongoing": case_data["case_ongoing"],
"case_ongoing_record_time": case_data["case_ongoing_record_time"],
"closing_year": case_data["closing_year"],
"order_start_year": case_data["order_start_year"],
"order_end_year": case_data["order_end_year"],
"defendant_payment": case_data["defendant_payment"],
"class_action_sought": case_data["class_action_sought"],
"class_action_granted": case_data["class_action_granted"],
"attorney_orgs": case_data["attorney_org"],
"prevailing_party": case_data["prevailing_party"],
"plaintiff_types": case_data["plaintiff_types"],
"plaintiff_description": case_data["plaintiff_description"],
"constitutional_clauses": case_data["constitutional_clauses"],
"causes_of_action": case_data["causes_of_action"],
"summary_authors": case_data["summary_authors"],
"case_url": case_data["case_url"],
}
yield case_data["case_id"], {
"id": case_data["case_id"],
"sources": case_sources,
"sources_metadata": case_source_metadata,
"summary/long": case_data["summary/long"],
"summary/short": case_data["summary/short"],
"summary/tiny": case_data["summary/tiny"],
"case_metadata": case_metadata,
}