Sagnik Ray Choudhury commited on
Commit
1c0cd8c
1 Parent(s): 5c99d5b

first commit

Browse files
Files changed (5) hide show
  1. README.md +5 -0
  2. snli-bt.py +72 -0
  3. test.jsonl +3 -0
  4. train.jsonl +3 -0
  5. validation.jsonl +3 -0
README.md CHANGED
@@ -1,3 +1,8 @@
 
1
  ---
2
  license: afl-3.0
3
  ---
 
 
 
 
 
1
+ <<<<<<< HEAD
2
  ---
3
  license: afl-3.0
4
  ---
5
+ =======
6
+ ### Dataset Card for SNLI Back Translation
7
+ back translation of SNLI dataset: only use the test version
8
+ >>>>>>> 2afd18c (first commit)
snli-bt.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _URL = "https://huggingface.co/datasets/sagnikrayc/snli-bt/resolve/main"
8
+ _URLS = {
9
+ "train": f"{_URL}/train.jsonl",
10
+ "validation": f"{_URL}/validation.jsonl",
11
+ "test": f"{_URL}/test.jsonl",
12
+ }
13
+
14
+
15
+ class SnliBTConfig(datasets.BuilderConfig):
16
+ def __init__(self, **kwargs):
17
+ super(SnliBTConfig, self).__init__(**kwargs)
18
+
19
+
20
+ class SnliCF(datasets.GeneratorBasedBuilder):
21
+ """SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
22
+
23
+ BUILDER_CONFIGS = [
24
+ SnliBTConfig(
25
+ name="plain_text",
26
+ version=datasets.Version("1.0.0", ""),
27
+ description="Plain text",
28
+ ),
29
+ ]
30
+
31
+ def _info(self):
32
+ return datasets.DatasetInfo(
33
+ description="NA",
34
+ features=datasets.Features(
35
+ {
36
+ "idx": datasets.Value("string"),
37
+ "premise": datasets.Value("string"),
38
+ "hypothesis": datasets.Value("string"),
39
+ "label": datasets.Value("string"),
40
+ "_type": datasets.Value("string"),
41
+ }
42
+ ),
43
+ # No default supervised_keys (as we have to pass both question
44
+ # and context as input).
45
+ supervised_keys=None
46
+ )
47
+
48
+ def _split_generators(self, dl_manager):
49
+ downloaded_files = dl_manager.download_and_extract(_URLS)
50
+
51
+ return [
52
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
53
+ datasets.SplitGenerator(
54
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}
55
+ ),
56
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
57
+ ]
58
+
59
+ def _generate_examples(self, filepath):
60
+ """This function returns the examples in the raw (text) form."""
61
+ logger.info("generating examples from = %s", filepath)
62
+ with open(filepath, encoding="utf-8") as rf:
63
+ for idx, line in enumerate(rf):
64
+ if line:
65
+ _line = json.loads(line)
66
+ yield idx, {
67
+ "premise": _line["premise"],
68
+ "hypothesis": _line["hypothesis"],
69
+ "idx": _line["idx"],
70
+ "_type": _line["_type"],
71
+ "label": _line["label"]
72
+ }
test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e0ffeb1198abdd717a20257ed3067ba93422b9028835d4d76017fec5041df01
3
+ size 3860362
train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e0ffeb1198abdd717a20257ed3067ba93422b9028835d4d76017fec5041df01
3
+ size 3860362
validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e0ffeb1198abdd717a20257ed3067ba93422b9028835d4d76017fec5041df01
3
+ size 3860362