LuciaTormo commited on
Commit
150a837
1 Parent(s): 335dec5

Delete COPA-ca.py

Browse files
Files changed (1) hide show
  1. COPA-ca.py +0 -92
COPA-ca.py DELETED
@@ -1,92 +0,0 @@
1
- # Loading script for the COPA-ca dataset.
2
- import json
3
- import datasets
4
-
5
- logger = datasets.logging.get_logger(__name__)
6
-
7
- _CITATION = ""
8
-
9
- _DESCRIPTION = """\
10
- The COPA-ca dataset (Choice of plausible alternatives in Catalan) is a professional translation of the English COPA dataset into Catalan, commissioned by BSC LangTech Unit. The dataset consists of 1000 premises, each given a question and two choices with a label encoding which of the choices is more plausible given the annotator.
11
-
12
- The dataset is split into 400 training samples, 100 validation samples, and 500 test samples. It includes the following features: 'premise', 'choice1', 'choice2', 'label', 'question', 'changed' (boolean).
13
-
14
- This work is licensed under a Attribution-ShareAlike 4.0 International License.
15
- """
16
-
17
- _HOMEPAGE = "https://zenodo.org/record/8124398"
18
-
19
-
20
- _URL = "https://huggingface.co/datasets/projecte-aina/copa-ca/resolve/main/"
21
- _TRAIN_FILE = "copa-ca.train.jsonl"
22
- _DEV_FILE = "copa-ca.val.jsonl"
23
- _TEST_FILE = "copa-ca.test.jsonl"
24
-
25
-
26
- class copaCaConfig(datasets.BuilderConfig):
27
- """ Builder config for the COPA-ca dataset """
28
-
29
- def __init__(self, **kwargs):
30
- """BuilderConfig for COPA-ca.
31
- Args:
32
- **kwargs: keyword arguments forwarded to super.
33
- """
34
- super(copaCaConfig, self).__init__(**kwargs)
35
-
36
- class copaCa(datasets.GeneratorBasedBuilder):
37
- """ COPA-ca Dataset """
38
-
39
- BUILDER_CONFIGS = [
40
- copaCaConfig(
41
- name="copa-ca",
42
- version=datasets.Version("1.0.1"),
43
- description="COPA-ca dataset",
44
- ),
45
- ]
46
-
47
- def _info(self):
48
- return datasets.DatasetInfo(
49
- description=_DESCRIPTION,
50
- features=datasets.Features(
51
- {
52
- "premise": datasets.Value("string"),
53
- "choice1": datasets.Value("string"),
54
- "choice2": datasets.Value("string"),
55
- "question": datasets.Value("string"),
56
- 'label': datasets.features.ClassLabel(names=['1', '2']),
57
- "idx": datasets.Value("int64"),
58
- "changed": datasets.Value("bool"),
59
- }
60
- ),
61
- homepage=_HOMEPAGE,
62
- citation=_CITATION,
63
- )
64
-
65
- def _split_generators(self, dl_manager):
66
- """Returns SplitGenerators."""
67
- urls_to_download = {
68
- "train": f"{_URL}{_TRAIN_FILE}",
69
- "dev": f"{_URL}{_DEV_FILE}",
70
- "test": f"{_URL}{_TEST_FILE}",
71
- }
72
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
73
-
74
- return [
75
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
76
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
77
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
78
- ]
79
-
80
- def _generate_examples(self, filepath):
81
- with open(filepath, encoding='utf-8') as f:
82
- for i, line in enumerate(f):
83
- data = json.loads(line)
84
- yield i, {
85
- 'premise': data['premise'],
86
- 'choice1': data['choice1'],
87
- 'choice2': data['choice2'],
88
- 'question': data['question'],
89
- 'label': str(data['label']),
90
- 'idx': data['idx'],
91
- 'changed': data['changed']
92
- }