XciD HF staff XiangBo Admin commited on
Commit
b01fb01
0 Parent(s):

Squashing commit

Browse files

Co-authored-by: XiangBo <XiangBo@users.noreply.huggingface.co>
Co-authored-by: Admin <Admin@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
55
+ test_datasets.jsonl filter=lfs diff=lfs merge=lfs -text
56
+ train_datasets.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ validation_datasets.jsonl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ task_categories:
4
+ - text-generation
5
+ language:
6
+ - zh
7
+ tags:
8
+ - medical
9
+ size_categories:
10
+ - 100K<n<1M
11
+ ---
12
+ # Dataset Card for Huatuo_knowledge_graph_qa
13
+
14
+
15
+
16
+ ## Dataset Description
17
+
18
+ - **Homepage: https://www.huatuogpt.cn/**
19
+ - **Repository: https://github.com/FreedomIntelligence/HuatuoGPT**
20
+ - **Paper: https://arxiv.org/abs/2305.01526**
21
+ - **Leaderboard:**
22
+ - **Point of Contact:**
23
+
24
+
25
+
26
+ ### Dataset Summary
27
+
28
+ We built this QA dataset based on the medical knowledge map, with a total of 798,444 pieces of data, in which the questions are constructed by means of templates, and the answers are the contents of the entries in the knowledge map.
29
+
30
+
31
+
32
+
33
+
34
+
35
+
36
+ ## Dataset Creation
37
+
38
+
39
+
40
+
41
+ ### Source Data
42
+
43
+ https://cpubmed.openi.org.cn/graph/wiki
44
+ https://github.com/zhihao-chen/QASystemOnMedicalGraph
45
+ https://github.com/baiyang2464/chatbot-base-on-Knowledge-Graph
46
+
47
+
48
+
49
+
50
+
51
+ ## Citation
52
+
53
+ ```
54
+ @misc{li2023huatuo26m,
55
+ title={Huatuo-26M, a Large-scale Chinese Medical QA Dataset},
56
+ author={Jianquan Li and Xidong Wang and Xiangbo Wu and Zhiyi Zhang and Xiaolong Xu and Jie Fu and Prayag Tiwari and Xiang Wan and Benyou Wang},
57
+ year={2023},
58
+ eprint={2305.01526},
59
+ archivePrefix={arXiv},
60
+ primaryClass={cs.CL}
61
+ }
62
+ ```
my_dataset.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ from datasets import DatasetInfo, Features, Split, SplitGenerator, GeneratorBasedBuilder, Value, Sequence
5
+ import json
6
+
7
+ class MyDataset(GeneratorBasedBuilder):
8
+ def _info(self):
9
+ return DatasetInfo(
10
+ features=Features({
11
+ "questions": Sequence(Value("string")),
12
+ "answers": Sequence(Value("string"))
13
+ }),
14
+ supervised_keys=("questions", "answers"),
15
+ homepage="https://github.com/FreedomIntelligence/Huatuo-26M",
16
+ citation='''
17
+ @misc{li2023huatuo26m,
18
+ title={Huatuo-26M, a Large-scale Chinese Medical QA Dataset},
19
+ author={Jianquan Li and Xidong Wang and Xiangbo Wu and Zhiyi Zhang and Xiaolong Xu and Jie Fu and Prayag Tiwari and Xiang Wan and Benyou Wang},
20
+ year={2023},
21
+ eprint={2305.01526},
22
+ archivePrefix={arXiv},
23
+ primaryClass={cs.CL}
24
+ }
25
+
26
+ ''',
27
+ )
28
+
29
+ def _split_generators(self, dl_manager):
30
+ train_path = "train_datasets.jsonl"
31
+ validation_path = "validation_datasets.jsonl"
32
+ test_path = "test_datasets.jsonl"
33
+
34
+ return [
35
+ SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": train_path}),
36
+ SplitGenerator(name=Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
37
+ SplitGenerator(name=Split.TEST, gen_kwargs={"filepath": test_path}),
38
+ ]
39
+
40
+ def _generate_examples(self, filepath):
41
+ with open(filepath, encoding="utf-8") as f:
42
+ for id_, row in enumerate(f):
43
+ # Process your data here and create a dictionary with the features.
44
+ # For example, if your data is in JSON format:
45
+ data = json.loads(row)
46
+ yield id_, {
47
+ "questions": data["questions"],
48
+ "answers": data["answers"],
49
+ }
50
+
51
+ if __name__ == '__main__':
52
+ from datasets import load_dataset
53
+
54
+ dataset = load_dataset("my_dataset.py")
55
+
56
+ print()
test_datasets.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56d4dc9f0fdf5dd43ee652ea6bd1567d698cf2a9d7b288f00e203a18e0bda82f
3
+ size 174081
train_datasets.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7716b46897e2dbef177af91670b2afd1d4989582bc237465a7fc574cc8269c1f
3
+ size 148347165
validation_datasets.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd5920e32f7343a31e2501b49b3e4061f703b6fb537e54881905a0a7b5bf8c39
3
+ size 321211