Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
leonardorigutini commited on
Commit
11f9f3a
1 Parent(s): d16d47a

Delete buster.py

Browse files
Files changed (1) hide show
  1. buster.py +0 -115
buster.py DELETED
@@ -1,115 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """BUSTER: a BUSiness Transaction Entity Recognition Dataset"""
16
-
17
- import os
18
- import datasets
19
- from datasets import load_dataset
20
-
21
- _CITATION = """
22
- Accepted at EMNLP 2023 - Industry Track.
23
- TBA
24
- """
25
-
26
- _DESCRIPTION = """
27
- Buster is an Entity Recognition dataset consisting of 3779 manually annotated documents on financial transactions.
28
- Documents were selected using EDGAR (Electronic Data Gathering, Analysis, and 116 Retrieval system) from the
29
- U.S. Securities and Exchange Commission (SEC).
30
- The corpus focuses on the main actors involved in business transactions.
31
- Overall, there are three families of entities: Parties, Advisors and Generic information, for a total of 6 annotated
32
- entity types.
33
- We also released a corpus of 6196 automatically annotated documents.
34
- """
35
-
36
- _HOMEPAGE = "https://expert.ai/buster"
37
- _URL = "buster.zip"
38
- _VERSION = "1.0.0"
39
-
40
- logger = datasets.logging.get_logger(__name__)
41
-
42
-
43
- # --------------------------------------------------------------------------------------------------------
44
- # Tag set
45
- _LABELS = [
46
- "O", # non-entities label
47
- "B-Parties.BUYING_COMPANY",
48
- "I-Parties.BUYING_COMPANY",
49
- "B-Parties.SELLING_COMPANY",
50
- "I-Parties.SELLING_COMPANY",
51
- "B-Parties.ACQUIRED_COMPANY",
52
- "I-Parties.ACQUIRED_COMPANY",
53
- "B-Advisors.LEGAL_CONSULTING_COMPANY",
54
- "I-Advisors.LEGAL_CONSULTING_COMPANY",
55
- "B-Advisors.GENERIC_CONSULTING_COMPANY",
56
- "I-Advisors.GENERIC_CONSULTING_COMPANY",
57
- "B-Generic_Info.ANNUAL_REVENUES",
58
- "I-Generic_Info.ANNUAL_REVENUES"
59
- ]
60
-
61
-
62
- class BusterConfig(datasets.BuilderConfig):
63
- """BuilderConfig for the BUSTER dataset."""
64
-
65
- def __init__(self, **kwargs):
66
- """BuilderConfig for the BUSTER dataset.
67
- Args:
68
- **kwargs: keyword arguments forwarded to super.
69
- """
70
- super(BusterConfig, self).__init__(
71
- name=f"BUSTER",
72
- description=_DESCRIPTION,
73
- version=datasets.Version(_VERSION), # hf dataset script version
74
- **kwargs,
75
- )
76
-
77
-
78
- class Buster(datasets.GeneratorBasedBuilder):
79
- """The BUSTER dataset."""
80
-
81
- BUILDER_CONFIGS = [
82
- BusterConfig()
83
- ]
84
-
85
- def _info(self):
86
- return datasets.DatasetInfo(
87
- description=_DESCRIPTION,
88
- features=datasets.Features(
89
- {
90
- "document_id": datasets.Value("string"),
91
- "tokens": datasets.Sequence(datasets.Value("string")),
92
- "labels": datasets.Sequence(datasets.features.ClassLabel(names=_LABELS)),
93
- }
94
- ),
95
- homepage=_HOMEPAGE,
96
- citation=_CITATION,
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- data_dir = dl_manager.download_and_extract(_URL)
101
- fold_names = [f"FOLD_{i}" for i in range(5)] + ["SILVER"]
102
- return [
103
- datasets.SplitGenerator(
104
- name=fold_name,
105
- gen_kwargs={"file_path": os.path.join(data_dir, fold_name)},
106
- ) for fold_name in fold_names
107
- ]
108
-
109
- def _generate_examples(self, file_path):
110
- dataset = load_dataset("json", data_files=file_path)
111
- dataset = dataset.remove_columns(["positions"])
112
- logger.info(f"Generating examples from: {file_path}")
113
- for idx, example in enumerate(dataset["train"]):
114
- # example features: document_id, tokens, labels
115
- yield idx, example