shibing624 commited on
Commit
4aa6a91
1 Parent(s): bc034d8

Create nli-zh-all.py

Browse files
Files changed (1) hide show
  1. nli-zh-all.py +109 -0
nli-zh-all.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The Chinese Natural Language Inference (NLI-zh-all) Corpus.
18
+ upload: https://github.com/shibing624
19
+ """
20
+
21
+
22
+ import csv
23
+ import os
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """\
29
+ @misc{Text2vec,
30
+ author = {Xu, Ming},
31
+ title = {Text2vec: Text to vector toolkit},
32
+ year = {2022},
33
+ publisher = {GitHub},
34
+ journal = {GitHub repository},
35
+ howpublished = {\url{https://github.com/shibing624/text2vec}},
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ The SNLI corpus (version 1.0) is a merged chinese sentence similarity dataset, supporting the task of natural language
41
+ inference (NLI), also known as recognizing textual entailment (RTE).
42
+ """
43
+
44
+ _DATA_URL = "https://huggingface.co/datasets/shibing624/nli-zh-all/resolve/main/sampled_data"
45
+
46
+
47
+ class Nli(datasets.GeneratorBasedBuilder):
48
+ """The Chinese Natural Language Inference (NLI-zh-all) Corpus."""
49
+
50
+ BUILDER_CONFIGS = [
51
+ datasets.BuilderConfig(
52
+ name="plain_text",
53
+ version=datasets.Version("1.0.0", ""),
54
+ description="Plain text import of NLI-zh-all",
55
+ )
56
+ ]
57
+
58
+ def _info(self):
59
+ return datasets.DatasetInfo(
60
+ description=_DESCRIPTION,
61
+ features=datasets.Features(
62
+ {
63
+ "text1": datasets.Value("string"),
64
+ "text2": datasets.Value("string"),
65
+ "label": datasets.Value("int64"),
66
+ }
67
+ ),
68
+
69
+ supervised_keys=None,
70
+ homepage="https://github.com/shibing624/text2vec",
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+
76
+ files = ['simclue-train-2k.jsonl',
77
+ 'nli_zh-train-25k.jsonl',
78
+ 'alpaca_gpt4-train-2k.jsonl',
79
+ 'cmrc2018-train-2k.jsonl',
80
+ 'snli_zh-train-5k.jsonl',
81
+ 'chatmed_consult-train-500.jsonl',
82
+ 'zhihu_kol-train-2k.jsonl',
83
+ 'cblue_chip_sts-train-2k.jsonl',
84
+ 'csl-train-500.jsonl',
85
+ 'webqa-train-500.jsonl',
86
+ 'xlsum-train-1k.jsonl',]
87
+ data_files = [f"{_DATA_URL}/{i}" for i in files]
88
+ return [
89
+
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_manager.download_and_extract(data_files)}
92
+ ),
93
+ ]
94
+
95
+ def _generate_examples(self, filepath):
96
+ """This function returns the examples in the raw (text) form."""
97
+ id = 0
98
+ if isinstance(filepath, str):
99
+ filepath = [filepath]
100
+ for file in filepath:
101
+ with open(file, encoding="utf-8") as f:
102
+ for key, row in enumerate(f):
103
+ data = json.loads(row)
104
+ yield id, {
105
+ "text1": data["text1"],
106
+ "text2": data["text2"],
107
+ "label": data["label"]
108
+ }
109
+ id += 1