ruanchaves commited on
Commit
c2ac41e
1 Parent(s): e26150c

Update stan_large.py

Browse files
Files changed (1) hide show
  1. stan_large.py +4 -17
stan_large.py CHANGED
@@ -37,9 +37,9 @@ which refers to the “Lionhead” video game company, was labeled as “lion he
37
  We therefore constructed the STAN large dataset of 12,594 hashtags with additional quality control for human annotations."
38
  """
39
  _URLS = {
40
- "train": "https://github.com/prashantkodali/HashSet/raw/master/datasets/stan-large-maddela_et_al_train.pkl",
41
- "dev": "https://github.com/prashantkodali/HashSet/raw/master/datasets/stan-large-maddela_et_al_dev.pkl",
42
- "test": "https://github.com/prashantkodali/HashSet/raw/master/datasets/stan-large-maddela_et_al_test.pkl"
43
  }
44
 
45
  class StanLarge(datasets.GeneratorBasedBuilder):
@@ -100,20 +100,7 @@ class StanLarge(datasets.GeneratorBasedBuilder):
100
  alts = [{"segmentation": x} for x in alts]
101
  return alts
102
 
103
- with open(filepath, 'rb') as f:
104
- try:
105
- import pickle
106
- records = pickle.load(f)
107
- except ValueError:
108
- try:
109
- import pickle5 as pickle
110
- records = pickle.load(f)
111
- except ModuleNotFoundError:
112
- raise ImportError(
113
- """To be able to use stan_large, you need to install the following dependencies['pickle5']
114
- using 'pip install pickle5' for instance"""
115
- )
116
- records = records.to_dict("records")
117
  for idx, row in enumerate(records):
118
  segmentation = get_segmentation(row)
119
  alternatives = get_alternatives(row, segmentation)
 
37
  We therefore constructed the STAN large dataset of 12,594 hashtags with additional quality control for human annotations."
38
  """
39
  _URLS = {
40
+ "train": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/stan_large_train.csv",
41
+ "dev": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/stan_large_dev.csv",
42
+ "test": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/stan_large_test.csv"
43
  }
44
 
45
  class StanLarge(datasets.GeneratorBasedBuilder):
 
100
  alts = [{"segmentation": x} for x in alts]
101
  return alts
102
 
103
+ records = pd.read_csv(filepath).to_dict("records")
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  for idx, row in enumerate(records):
105
  segmentation = get_segmentation(row)
106
  alternatives = get_alternatives(row, segmentation)