Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
word-segmentation
License:
ruanchaves commited on
Commit
a85b37c
1 Parent(s): 1560080

Upload test_stanford.py

Browse files
Files changed (1) hide show
  1. test_stanford.py +123 -0
test_stanford.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test-Stanford dataset by Bansal et al.."""
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+ _CITATION = """
7
+ @misc{bansal2015deep,
8
+ title={Towards Deep Semantic Analysis Of Hashtags},
9
+ author={Piyush Bansal and Romil Bansal and Vasudeva Varma},
10
+ year={2015},
11
+ eprint={1501.03210},
12
+ archivePrefix={arXiv},
13
+ primaryClass={cs.IR}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """
18
+ Manually Annotated Stanford Sentiment Analysis Dataset by Bansal et al..
19
+ """
20
+ _URLS = {
21
+ "test": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/Test-Stanford.txt"
22
+ }
23
+
24
+ class TestStanford(datasets.GeneratorBasedBuilder):
25
+
26
+ VERSION = datasets.Version("1.0.0")
27
+
28
+ def _info(self):
29
+ return datasets.DatasetInfo(
30
+ description=_DESCRIPTION,
31
+ features=datasets.Features(
32
+ {
33
+ "index": datasets.Value("int32"),
34
+ "hashtag": datasets.Value("string"),
35
+ "segmentation": datasets.Value("string"),
36
+ "gold_position": datasets.Value("int32"),
37
+ "rank": datasets.Sequence(
38
+ {
39
+ "position": datasets.Value("int32"),
40
+ "candidate": datasets.Value("string")
41
+ }
42
+ )
43
+ }
44
+ ),
45
+ supervised_keys=None,
46
+ homepage="",
47
+ citation=_CITATION,
48
+ )
49
+
50
+ def _split_generators(self, dl_manager):
51
+ downloaded_files = dl_manager.download(_URLS)
52
+ return [
53
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }),
54
+ ]
55
+
56
+ def _generate_examples(self, filepath):
57
+
58
+ names = ["id","hashtag","candidate", "label"]
59
+ df = pd.read_csv(filepath, sep="\t", skiprows=1, header=None,
60
+ names=names)
61
+
62
+ for col in names[0:-1]:
63
+ df[col] = df[col].apply(lambda x: x.strip("'").strip())
64
+
65
+ records = df.to_dict('records')
66
+
67
+ output = []
68
+
69
+ current_hashtag = None
70
+ hashtag = None
71
+ candidates = []
72
+ ids = []
73
+ label = []
74
+
75
+
76
+ for row in records:
77
+ hashtag = row["hashtag"]
78
+ if current_hashtag != hashtag:
79
+ new_row = {
80
+ "hashtag": current_hashtag,
81
+ "candidate": candidates,
82
+ "id": ids,
83
+ "label": label
84
+ }
85
+
86
+ if current_hashtag:
87
+ output.append(new_row)
88
+
89
+ current_hashtag = row['hashtag']
90
+ candidates = [row["candidate"]]
91
+ ids = int(row["id"])
92
+ label = [int(row["label"])]
93
+ else:
94
+ candidates.append(row["candidate"])
95
+ label.append(int(row["label"]))
96
+
97
+ def get_gold_position(row):
98
+ try:
99
+ return row["label"].index(1)
100
+ except ValueError:
101
+ return None
102
+
103
+ def get_rank(row):
104
+ return [{
105
+ "position": idx + 1,
106
+ "candidate": item
107
+ } for idx, item in enumerate(row["candidate"])]
108
+
109
+ def get_segmentation(row):
110
+ try:
111
+ gold_idx = row["label"].index(1)
112
+ return row["candidate"][gold_idx]
113
+ except ValueError:
114
+ return None
115
+
116
+ for idx, row in enumerate(output):
117
+ yield idx, {
118
+ "index": int(row["id"]),
119
+ "hashtag": row["hashtag"],
120
+ "segmentation": get_segmentation(row),
121
+ "gold_position": get_gold_position(row),
122
+ "rank": get_rank(row)
123
+ }