lewtun HF staff commited on
Commit
02eb343
1 Parent(s): 5ffef31

Add UTF-8 encoding

Browse files
hooks/pre_gen_project.py CHANGED
@@ -14,4 +14,6 @@ repo_url = HfApi().create_repo(
14
  repo_type="dataset",
15
  )
16
 
17
- model_repo = Repository(local_dir=".", clone_from=repo_url, use_auth_token=huggingface_token)
 
 
14
  repo_type="dataset",
15
  )
16
 
17
+ model_repo = Repository(
18
+ local_dir=".", clone_from=repo_url, use_auth_token=huggingface_token
19
+ )
{{cookiecutter.repo_name}}/cli.py CHANGED
@@ -26,17 +26,19 @@ app = typer.Typer()
26
 
27
  def _update_submission_name(submission_name: str):
28
  replacement = ""
29
- with open("README.md", "r") as f:
30
  lines = f.readlines()
31
 
32
  for line in lines:
33
  if line.startswith("submission_name:"):
34
- changes = re.sub(r"submission_name:.+", f"submission_name: {submission_name}", line)
 
 
35
  replacement += changes
36
  else:
37
  replacement += line
38
 
39
- with open("README.md", "w") as f:
40
  f.write(replacement)
41
 
42
 
@@ -47,9 +49,13 @@ def validate():
47
 
48
  # Check that all the expected files exist
49
  prediction_files = list(Path("data").rglob("predictions.csv"))
50
- mismatched_files = set(tasks).symmetric_difference(set([f.parent.name for f in prediction_files]))
 
 
51
  if mismatched_files:
52
- raise ValueError(f"Incorrect number of files! Expected {len(tasks)} files, but got {len(prediction_files)}.")
 
 
53
 
54
  # Check all files have the expected shape (number of rows, number of columns)
55
  # TODO(lewtun): Add a check for the specific IDs per file
@@ -86,7 +92,11 @@ def validate():
86
 
87
 
88
  @app.command()
89
- def submit(submission_name: str = typer.Option(..., prompt="Please provide a name for your submission, e.g. GPT-4 😁")):
 
 
 
 
90
  subprocess.call("git pull origin main".split())
91
  _update_submission_name(submission_name)
92
  subprocess.call(["git", "add", "data/*predictions.csv", "README.md"])
26
 
27
  def _update_submission_name(submission_name: str):
28
  replacement = ""
29
+ with open("README.md", "r", encoding="utf-8") as f:
30
  lines = f.readlines()
31
 
32
  for line in lines:
33
  if line.startswith("submission_name:"):
34
+ changes = re.sub(
35
+ r"submission_name:.+", f"submission_name: {submission_name}", line
36
+ )
37
  replacement += changes
38
  else:
39
  replacement += line
40
 
41
+ with open("README.md", "w", encoding="utf-8") as f:
42
  f.write(replacement)
43
 
44
 
49
 
50
  # Check that all the expected files exist
51
  prediction_files = list(Path("data").rglob("predictions.csv"))
52
+ mismatched_files = set(tasks).symmetric_difference(
53
+ set([f.parent.name for f in prediction_files])
54
+ )
55
  if mismatched_files:
56
+ raise ValueError(
57
+ f"Incorrect number of files! Expected {len(tasks)} files, but got {len(prediction_files)}."
58
+ )
59
 
60
  # Check all files have the expected shape (number of rows, number of columns)
61
  # TODO(lewtun): Add a check for the specific IDs per file
92
 
93
 
94
  @app.command()
95
+ def submit(
96
+ submission_name: str = typer.Option(
97
+ ..., prompt="Please provide a name for your submission, e.g. GPT-4 😁"
98
+ )
99
+ ):
100
  subprocess.call("git pull origin main".split())
101
  _update_submission_name(submission_name)
102
  subprocess.call(["git", "add", "data/*predictions.csv", "README.md"])
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}.py CHANGED
@@ -43,7 +43,9 @@ _LICENSE = ""
43
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
44
  # This gets all folders within the directory named `data`
45
  # DATA_DIRS = next(os.walk('data'))[1]
46
- DATA_DIR_URL = "data/" # "https://huggingface.co/datasets/ought/raft/resolve/main/data/"
 
 
47
  # print([p for p in DATA_DIR_PATH.iterdir() if p.is_dir()])
48
  TASKS = {
49
  "ade_corpus_v2": {
@@ -163,7 +165,12 @@ TASKS = {
163
  "name": "neurips_impact_statement_risks",
164
  "description": "",
165
  "data_columns": ["Paper title", "Paper link", "Impact statement", "ID"],
166
- "label_columns": {"Label": ["doesn't mention a harmful application", "mentions a harmful application"]},
 
 
 
 
 
167
  },
168
  "overruling": {
169
  "name": "overruling",
@@ -236,11 +243,11 @@ class RaftSubmission(datasets.GeneratorBasedBuilder):
236
  td = TASKS[key]
237
  name = td["name"]
238
  description = td["description"]
239
- BUILDER_CONFIGS.append(datasets.BuilderConfig(name=name, version=VERSION, description=description))
 
 
240
 
241
- DEFAULT_CONFIG_NAME = (
242
- "tai_safety_research" # It's not mandatory to have a default configuration. Just use one if it make sense.
243
- )
244
 
245
  def _info(self):
246
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -286,18 +293,27 @@ class RaftSubmission(datasets.GeneratorBasedBuilder):
286
  dataset = self.config.name
287
  return [
288
  datasets.SplitGenerator(
289
- name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir[dataset]["test"], "split": "test"}
 
290
  )
291
  ]
292
 
293
  def _generate_examples(
294
- self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
 
295
  ):
296
  """Yields examples as (key, example) tuples."""
297
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
298
  # The `key` is here for legacy reason (tfds) and is not important in itself.
299
  with open(filepath, encoding="utf-8") as f:
300
- csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
 
 
 
 
 
 
301
  column_names = next(csv_reader)
302
  for id_, row in enumerate(csv_reader):
303
  # dicts don't have inherent ordering in python, right??
43
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
44
  # This gets all folders within the directory named `data`
45
  # DATA_DIRS = next(os.walk('data'))[1]
46
+ DATA_DIR_URL = (
47
+ "data/" # "https://huggingface.co/datasets/ought/raft/resolve/main/data/"
48
+ )
49
  # print([p for p in DATA_DIR_PATH.iterdir() if p.is_dir()])
50
  TASKS = {
51
  "ade_corpus_v2": {
165
  "name": "neurips_impact_statement_risks",
166
  "description": "",
167
  "data_columns": ["Paper title", "Paper link", "Impact statement", "ID"],
168
+ "label_columns": {
169
+ "Label": [
170
+ "doesn't mention a harmful application",
171
+ "mentions a harmful application",
172
+ ]
173
+ },
174
  },
175
  "overruling": {
176
  "name": "overruling",
243
  td = TASKS[key]
244
  name = td["name"]
245
  description = td["description"]
246
+ BUILDER_CONFIGS.append(
247
+ datasets.BuilderConfig(name=name, version=VERSION, description=description)
248
+ )
249
 
250
+ DEFAULT_CONFIG_NAME = "tai_safety_research" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
 
251
 
252
  def _info(self):
253
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
293
  dataset = self.config.name
294
  return [
295
  datasets.SplitGenerator(
296
+ name=datasets.Split.TEST,
297
+ gen_kwargs={"filepath": data_dir[dataset]["test"], "split": "test"},
298
  )
299
  ]
300
 
301
  def _generate_examples(
302
+ self,
303
+ filepath,
304
+ split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
305
  ):
306
  """Yields examples as (key, example) tuples."""
307
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
308
  # The `key` is here for legacy reason (tfds) and is not important in itself.
309
  with open(filepath, encoding="utf-8") as f:
310
+ csv_reader = csv.reader(
311
+ f,
312
+ quotechar='"',
313
+ delimiter=",",
314
+ quoting=csv.QUOTE_ALL,
315
+ skipinitialspace=True,
316
+ )
317
  column_names = next(csv_reader)
318
  for id_, row in enumerate(csv_reader):
319
  # dicts don't have inherent ordering in python, right??