Datasets:

Modalities:
Text
Formats:
json
Languages:
Chinese
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 1,599 Bytes
f55a5f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46



from datasets import DatasetInfo, Features, Split, SplitGenerator, GeneratorBasedBuilder, Value, Sequence
import json

class MyDataset(GeneratorBasedBuilder):
    def _info(self):
        return DatasetInfo(
            features=Features({
                "questions": Sequence(Value("string")),
                "answers": Sequence(Value("string"))
            }),
            supervised_keys=("questions", "answers"),
            homepage="https://github.com/FreedomIntelligence/HuatuoGPT",
            citation="...",
        )

    def _split_generators(self, dl_manager):
        train_path = "train_datasets.jsonl"
        validation_path = "validation_datasets.jsonl"
        test_path = "test_datasets.jsonl"

        return [
            SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": train_path}),
            SplitGenerator(name=Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
            SplitGenerator(name=Split.TEST, gen_kwargs={"filepath": test_path}),
        ]

    def _generate_examples(self, filepath):
        with open(filepath, encoding="utf-8") as f:
            for id_, row in enumerate(f):
                # Process your data here and create a dictionary with the features.
                # For example, if your data is in JSON format:
                data = json.loads(row)
                yield id_, {
                    "questions": data["questions"],
                    "answers": data["answers"],
                }

if __name__ == '__main__':
    from datasets import load_dataset

    dataset = load_dataset("my_dataset.py")

    print()