Datasets:

Modalities:
Text
Formats:
json
Languages:
Chinese
ArXiv:
Libraries:
Datasets
pandas
License:
AnonNo2 commited on
Commit
ede0033
1 Parent(s): aad7903

fir commit

Browse files
Files changed (2) hide show
  1. my_dataset.py +46 -0
  2. test_datasets.jsonl +0 -0
my_dataset.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ from datasets import DatasetInfo, Features, Split, SplitGenerator, GeneratorBasedBuilder, Value, Sequence
5
+ import json
6
+
7
+ class MyDataset(GeneratorBasedBuilder):
8
+ def _info(self):
9
+ return DatasetInfo(
10
+ features=Features({
11
+ "questions": Sequence(Value("string")),
12
+ "answers": Sequence(Value("string"))
13
+ }),
14
+ supervised_keys=("questions", "answers"),
15
+ homepage="https://github.com/FreedomIntelligence/HuatuoGPT",
16
+ citation="...",
17
+ )
18
+
19
+ def _split_generators(self, dl_manager):
20
+ train_path = "train_datasets.jsonl"
21
+ validation_path = "validation_datasets.jsonl"
22
+ test_path = "test_datasets.jsonl"
23
+
24
+ return [
25
+ SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": train_path}),
26
+ SplitGenerator(name=Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
27
+ SplitGenerator(name=Split.TEST, gen_kwargs={"filepath": test_path}),
28
+ ]
29
+
30
+ def _generate_examples(self, filepath):
31
+ with open(filepath, encoding="utf-8") as f:
32
+ for id_, row in enumerate(f):
33
+ # Process your data here and create a dictionary with the features.
34
+ # For example, if your data is in JSON format:
35
+ data = json.loads(row)
36
+ yield id_, {
37
+ "questions": data["questions"],
38
+ "answers": data["answers"],
39
+ }
40
+
41
+ if __name__ == '__main__':
42
+ from datasets import load_dataset
43
+
44
+ dataset = load_dataset("my_dataset.py")
45
+
46
+ print()
test_datasets.jsonl ADDED
The diff for this file is too large to render. See raw diff