Sam commited on
Commit
b73b0e5
1 Parent(s): f4350a3

Update from sam

Browse files
Files changed (1) hide show
  1. argument_mining_dataloader.py +132 -0
argument_mining_dataloader.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """
18
+ Arguement Mining Dataset created by Stab , Gurevych et. al. CL 2017
19
+ """
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @article{stab2017parsing,
29
+ title={Parsing argumentation structures in persuasive essays},
30
+ author={Stab, Christian and Gurevych, Iryna},
31
+ journal={Computational Linguistics},
32
+ volume={43},
33
+ number={3},
34
+ pages={619--659},
35
+ year={2017},
36
+ publisher={MIT Press One Rogers Street, Cambridge, MA 02142-1209, USA journals-info~…}
37
+ }
38
+ """
39
+
40
+ _DESCRIPTION = """\
41
+ tokens along with chunk id. Begining of arguement denoted by Arg_B,inside arguement
42
+ denoted by Arg_I, other chunks are O
43
+ Orginial train,test split as used by the paper is provided
44
+ """
45
+
46
+ _URL = "https://raw.githubusercontent.com/Sam131112/Argument-Mining-Dataset/main/"
47
+ _TRAINING_FILE = "train.txt"
48
+ _TEST_FILE = "test.txt"
49
+
50
+
51
+ class ArguementMiningCL2017Config(datasets.BuilderConfig):
52
+ """BuilderConfig for CL2017"""
53
+
54
+ def __init__(self, **kwargs):
55
+ """BuilderConfig forCl2017.
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(Conll2003Config, self).__init__(**kwargs)
60
+
61
+
62
+ class Conll2003(datasets.GeneratorBasedBuilder):
63
+ """Conll2003 dataset."""
64
+
65
+ BUILDER_CONFIGS = [
66
+ Conll2003Config(name="cl2017", version=datasets.Version("1.0.0"), description="Cl2017 dataset"),
67
+ ]
68
+
69
+ def _info(self):
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(
73
+ {
74
+ "chunk_tags":datasets.Sequence(
75
+ datasets.features.ClassLabel(
76
+ names=[
77
+ "O",
78
+ "Arg_B",
79
+ "Arg_I",
80
+ ]
81
+ )
82
+ ),
83
+ }
84
+ ),
85
+ supervised_keys=None,
86
+ homepage="https://direct.mit.edu/coli/article/43/3/619/1573/Parsing-Argumentation-Structures-in-Persuasive",
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ """Returns SplitGenerators."""
92
+ urls_to_download = {
93
+ "train": f"{_URL}{_TRAINING_FILE}",
94
+ "test": f"{_URL}{_TEST_FILE}",
95
+ }
96
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
97
+
98
+ return [
99
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
100
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
101
+ ]
102
+
103
+ def _generate_examples(self, filepath):
104
+ logger.info("⏳ Generating examples from = %s", filepath)
105
+ with open(filepath, encoding="utf-8") as f:
106
+ guid = 0
107
+ tokens = []
108
+ pos_tags = []
109
+ chunk_tags = []
110
+ ner_tags = []
111
+ for line in f:
112
+ if line == "\n":
113
+ if tokens:
114
+ yield guid, {
115
+ "id": str(guid),
116
+ "tokens": tokens,
117
+ "chunk_tags": chunk_tags,
118
+ }
119
+ guid += 1
120
+ tokens = []
121
+ chunk_tags = []
122
+ else:
123
+ # conll2003 tokens are space separated
124
+ splits = line.split("\t")
125
+ tokens.append(splits[0])
126
+ chunk_tags.append(splits[1])
127
+ # last example
128
+ yield guid, {
129
+ "id": str(guid),
130
+ "tokens": tokens,
131
+ "chunk_tags": chunk_tags,
132
+ }