ruanchaves commited on
Commit
b261299
1 Parent(s): 645963b

Upload stan_small.py

Browse files
Files changed (1) hide show
  1. stan_small.py +76 -0
stan_small.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """STAN small dataset by Bansal et al.."""
2
+
3
+ import datasets
4
+ import pandas as pd
5
+ import pickle
6
+
7
+ _CITATION = """
8
+ @misc{bansal2015deep,
9
+ title={Towards Deep Semantic Analysis Of Hashtags},
10
+ author={Piyush Bansal and Romil Bansal and Vasudeva Varma},
11
+ year={2015},
12
+ eprint={1501.03210},
13
+ archivePrefix={arXiv},
14
+ primaryClass={cs.IR}
15
+ }
16
+ """
17
+
18
+ _DESCRIPTION = """
19
+ Manually Annotated Stanford Sentiment Analysis Dataset by Bansal et al..
20
+ """
21
+ _URLS = {
22
+ "test": "https://github.com/prashantkodali/HashSet/raw/master/datasets/stan-small-bansal_et_al.pkl"
23
+ }
24
+
25
+ class StanSmall(datasets.GeneratorBasedBuilder):
26
+
27
+ VERSION = datasets.Version("1.0.0")
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "index": datasets.Value("int32"),
35
+ "hashtag": datasets.Value("string"),
36
+ "segmentation": datasets.Value("string"),
37
+ "alternatives": datasets.Sequence(
38
+ {
39
+ "segmentation": datasets.Value("string")
40
+ }
41
+ )
42
+ }
43
+ ),
44
+ supervised_keys=None,
45
+ homepage="https://github.com/mounicam/hashtag_master",
46
+ citation=_CITATION,
47
+ )
48
+
49
+ def _split_generators(self, dl_manager):
50
+ downloaded_files = dl_manager.download(_URLS)
51
+ return [
52
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }),
53
+ ]
54
+
55
+ def _generate_examples(self, filepath):
56
+
57
+ def get_segmentation(row):
58
+ return row["goldtruths"][0]
59
+
60
+ def get_alternatives(row):
61
+ segmentations = [{
62
+ "segmentation": x
63
+ } for x in row["goldtruths"]]
64
+
65
+ return segmentations[1:]
66
+
67
+ with open(filepath, 'rb') as f:
68
+ records = pickle.load(f)
69
+ records = records.to_dict("records")
70
+ for idx, row in enumerate(records):
71
+ yield idx, {
72
+ "index": idx,
73
+ "hashtag": row["hashtags"],
74
+ "segmentation": get_segmentation(row),
75
+ "alternatives": get_alternatives(row)
76
+ }