|
|
|
|
|
|
|
from datasets import DatasetInfo, Features, Split, SplitGenerator, GeneratorBasedBuilder, Value, Sequence |
|
import json |
|
|
|
class MyDataset(GeneratorBasedBuilder): |
|
def _info(self): |
|
return DatasetInfo( |
|
features=Features({ |
|
"questions": Sequence(Value("string")), |
|
"answers": Sequence(Value("string")) |
|
}), |
|
supervised_keys=("questions", "answers"), |
|
homepage="https://github.com/FreedomIntelligence/HuatuoGPT", |
|
citation="...", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
train_path = "train_datasets.jsonl" |
|
validation_path = "validation_datasets.jsonl" |
|
test_path = "test_datasets.jsonl" |
|
|
|
return [ |
|
SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": train_path}), |
|
SplitGenerator(name=Split.VALIDATION, gen_kwargs={"filepath": validation_path}), |
|
SplitGenerator(name=Split.TEST, gen_kwargs={"filepath": test_path}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
|
|
|
|
data = json.loads(row) |
|
yield id_, { |
|
"questions": data["questions"], |
|
"answers": data["answers"], |
|
} |
|
|
|
if __name__ == '__main__': |
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("my_dataset.py") |
|
|
|
print() |