# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """The Chinese Natural Language Inference (NLI-zh-all) Corpus. upload: https://github.com/shibing624 """ import csv import os import json import datasets _CITATION = """https://github.com/shibing624/text2vec""" _DESCRIPTION = """\ The SNLI corpus (version 1.0) is a merged chinese sentence similarity dataset, supporting the task of natural language inference (NLI), also known as recognizing textual entailment (RTE). """ _DATA_URL = "https://huggingface.co/datasets/shibing624/nli-zh-all/resolve/main/sampled_data" class Nli(datasets.GeneratorBasedBuilder): """The Chinese Natural Language Inference (NLI-zh-all) Corpus.""" BUILDER_CONFIGS = [ datasets.BuilderConfig( name="plain_text", version=datasets.Version("1.0.0", ""), description="Plain text import of NLI-zh-all", ) ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text1": datasets.Value("string"), "text2": datasets.Value("string"), "label": datasets.Value("int64"), } ), supervised_keys=None, homepage="https://github.com/shibing624/text2vec", citation=_CITATION, ) def _split_generators(self, dl_manager): files = ['simclue-train-2k.jsonl', 'nli_zh-train-25k.jsonl', 'alpaca_gpt4-train-2k.jsonl', 'cmrc2018-train-2k.jsonl', 'snli_zh-train-5k.jsonl', 'chatmed_consult-train-500.jsonl', 'zhihu_kol-train-2k.jsonl', 'cblue_chip_sts-train-2k.jsonl', 'csl-train-500.jsonl', 'webqa-train-500.jsonl', 'xlsum-train-1k.jsonl',] data_files = [f"{_DATA_URL}/{i}" for i in files] return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_manager.download_and_extract(data_files)} ), ] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" id = 0 if isinstance(filepath, str): filepath = [filepath] for file in filepath: with open(file, encoding="utf-8") as f: for key, row in enumerate(f): data = json.loads(row) yield id, { "text1": data["text1"], "text2": data["text2"], "label": data["label"] } id += 1