# -*- coding: utf-8 -*- """ @author:XuMing(xuming624@qq.com) @description: 2021搜狐校园文本匹配算法大赛数据集 upload: https://github.com/shibing624 """ import csv import os import json import datasets _CITATION = """https://github.com/shibing624/text2vec""" _DESCRIPTION = """\ 2021搜狐校园文本匹配算法大赛数据集 """ _DATA_URL = "https://huggingface.co/datasets/shibing624/sts-sohu2021/resolve/main/" class Sohu(datasets.GeneratorBasedBuilder): """The Chinese Natural Language Inference (sts-sohu) Corpus.""" BUILDER_CONFIGS = [ datasets.BuilderConfig( name="dda", version=datasets.Version("1.0.0", ""), description="Plain text import of sts-sohu2021", ), datasets.BuilderConfig( name="ddb", version=datasets.Version("1.0.0", ""), description="Plain text import of sts-sohu2021", ), datasets.BuilderConfig( name="dca", version=datasets.Version("1.0.0", ""), description="Plain text import of sts-sohu2021", ), datasets.BuilderConfig( name="dcb", version=datasets.Version("1.0.0", ""), description="Plain text import of sts-sohu2021", ), datasets.BuilderConfig( name="cca", version=datasets.Version("1.0.0", ""), description="Plain text import of sts-sohu2021", ), datasets.BuilderConfig( name="ccb", version=datasets.Version("1.0.0", ""), description="Plain text import of sts-sohu2021", ), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "sentence1": datasets.Value("string"), "sentence2": datasets.Value("string"), "label": datasets.Value("int32"), } ), supervised_keys=None, homepage="https://github.com/shibing624/text2vec", citation=_CITATION, ) def _split_generators(self, dl_manager): dl_dir = dl_manager.download_and_extract(_DATA_URL) dl_file = f"{dl_dir}/{self.config.name}.jsonl" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_file} ), ] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" id = 0 if isinstance(filepath, str): filepath = [filepath] for file in filepath: with open(file, encoding="utf-8") as f: for key, row in enumerate(f): data = json.loads(row) yield id, { "sentence1": data["sentence1"], "sentence2": data["sentence2"], "label": int(data["label"]) } id += 1