AnnaSallesRius commited on
Commit
59e99cb
1 Parent(s): 64bc5df

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. OLD/PAWS-ca.py +97 -0
  2. OLD/dev_2k.json +0 -0
  3. OLD/test_2k.json +0 -0
  4. OLD/train.json +3 -0
OLD/PAWS-ca.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the PAWS-ca dataset
2
+ import json
3
+
4
+ import datasets
5
+
6
+
7
+ _CITATION = """
8
+ """
9
+
10
+ _DESCRIPTION = """
11
+ The PAWS-ca dataset (Paraphrase Adversaries from Word Scrambling in Catalan) is a translation of the English PAWS dataset into Catalan, commissioned by BSC LangTech Unit.
12
+
13
+ This dataset contains 4,000 human translated PAWS pairs and 49,000 machine translated pairs.
14
+
15
+ """
16
+
17
+ _HOMEPAGE = "https://zenodo.org/record/"
18
+
19
+ _URL = "https://huggingface.co/datasets/projecte-aina/paws-ca/resolve/main/"
20
+ _TRAIN_FILE = "train.json"
21
+ _DEV_FILE = "dev_2k.json"
22
+ _TEST_FILE = "test_2k.json"
23
+
24
+
25
+ class PAWSXConfig(datasets.BuilderConfig):
26
+ """BuilderConfig for PAWSX-ca."""
27
+
28
+ def __init__(self, **kwargs):
29
+ """Constructs a PAWSXConfig.
30
+ Args:
31
+ **kwargs: keyword arguments forwarded to super.
32
+ """
33
+ super(PAWSXConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs),
34
+
35
+
36
+ class PAWSX(datasets.GeneratorBasedBuilder):
37
+ """PAWS-ca, a Catalan version of PAWS."""
38
+
39
+ VERSION = datasets.Version("1.1.0")
40
+
41
+ BUILDER_CONFIGS = [
42
+ PAWSXConfig(
43
+ name="paws-ca",
44
+ description="PAWS-ca dataset",
45
+ ),
46
+ ]
47
+
48
+
49
+ def _info(self):
50
+ features = datasets.Features(
51
+ {
52
+ "id": datasets.Value("int32"),
53
+ "sentence1": datasets.Value("string"),
54
+ "sentence2": datasets.Value("string"),
55
+ "label": datasets.features.ClassLabel(names=["0", "1"]),
56
+ }
57
+ )
58
+ return datasets.DatasetInfo(
59
+ # This is the description that will appear on the datasets page.
60
+ description=_DESCRIPTION,
61
+ # This defines the different columns of the dataset and their types
62
+ features=features, # Here we define them above because they are different between the two configurations
63
+ # If there's a common (input, target) tuple from the features,
64
+ # specify them here. They'll be used if as_supervised=True in
65
+ # builder.as_dataset.
66
+ supervised_keys=None,
67
+ # Homepage of the dataset for documentation
68
+ homepage=_HOMEPAGE,
69
+ )
70
+
71
+ def _split_generators(self, dl_manager):
72
+ """Returns SplitGenerators."""
73
+ urls_to_download = {
74
+ "train": f"{_URL}{_TRAIN_FILE}",
75
+ "dev": f"{_URL}{_DEV_FILE}",
76
+ "test": f"{_URL}{_TEST_FILE}",
77
+ }
78
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
79
+
80
+ return [
81
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
82
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
83
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
84
+ ]
85
+
86
+ def _generate_examples(self, filepath):
87
+ with open(filepath, encoding='utf-8') as f:
88
+ data = json.load(f)
89
+ for i, row in enumerate(data):
90
+ yield i, {
91
+ 'id': row['id'],
92
+ 'sentence1': row['sentence1'],
93
+ 'sentence2': row['sentence2'],
94
+ 'label': row['label'],
95
+ }
96
+
97
+
OLD/dev_2k.json ADDED
The diff for this file is too large to render. See raw diff
 
OLD/test_2k.json ADDED
The diff for this file is too large to render. See raw diff
 
OLD/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c504c5b9c2d358575bd711c043326910b0c34fc490ab0fcbce46fe2e18cb6ad
3
+ size 17291348