--- configs: - config_name: emp_H3 data_files: - split: train path: "GUE/emp_H3/train.csv" - split: test path: "GUE/emp_H3/test.csv" - split: dev path: "GUE/emp_H3/dev.csv" - config_name: emp_H3K14ac data_files: - split: train path: "GUE/emp_H3K14ac/train.csv" - split: test path: "GUE/emp_H3K14ac/test.csv" - split: dev path: "GUE/emp_H3K14ac/dev.csv" - config_name: emp_H3K36me3 data_files: - split: train path: "GUE/emp_H3K36me3/train.csv" - split: test path: "GUE/emp_H3K36me3/test.csv" - split: dev path: "GUE/emp_H3K36me3/dev.csv" - config_name: emp_H3K4me1 data_files: - split: train path: "GUE/emp_H3K4me1/train.csv" - split: test path: "GUE/emp_H3K4me1/test.csv" - split: dev path: "GUE/emp_H3K4me1/dev.csv" - config_name: emp_H3K4me2 data_files: - split: train path: "GUE/emp_H3K4me2/train.csv" - split: test path: "GUE/emp_H3K4me2/test.csv" - split: dev path: "GUE/emp_H3K4me2/dev.csv" - config_name: emp_H3K4me3 data_files: - split: train path: "GUE/emp_H3K4me3/train.csv" - split: test path: "GUE/emp_H3K4me3/test.csv" - split: dev path: "GUE/emp_H3K4me3/dev.csv" - config_name: emp_H3K79me3 data_files: - split: train path: "GUE/emp_H3K79me3/train.csv" - split: test path: "GUE/emp_H3K79me3/test.csv" - split: dev path: "GUE/emp_H3K79me3/dev.csv" - config_name: emp_H3K9ac data_files: - split: train path: "GUE/emp_H3K9ac/train.csv" - split: test path: "GUE/emp_H3K9ac/test.csv" - split: dev path: "GUE/emp_H3K9ac/dev.csv" - config_name: emp_H4 data_files: - split: train path: "GUE/emp_H4/train.csv" - split: test path: "GUE/emp_H4/test.csv" - split: dev path: "GUE/emp_H4/dev.csv" - config_name: emp_H4ac data_files: - split: train path: "GUE/emp_H4ac/train.csv" - split: test path: "GUE/emp_H4ac/test.csv" - split: dev path: "GUE/emp_H4ac/dev.csv" - config_name: human_tf_0 data_files: - split: train path: "GUE/human_tf_0/train.csv" - split: test path: "GUE/human_tf_0/test.csv" - split: dev path: "GUE/human_tf_0/dev.csv" - config_name: human_tf_1 data_files: - split: train path: "GUE/human_tf_1/train.csv" - split: test path: "GUE/human_tf_1/test.csv" - split: dev path: "GUE/human_tf_1/dev.csv" - config_name: human_tf_2 data_files: - split: train path: "GUE/human_tf_2/train.csv" - split: test path: "GUE/human_tf_2/test.csv" - split: dev path: "GUE/human_tf_2/dev.csv" - config_name: human_tf_3 data_files: - split: train path: "GUE/human_tf_3/train.csv" - split: test path: "GUE/human_tf_3/test.csv" - split: dev path: "GUE/human_tf_3/dev.csv" - config_name: human_tf_4 data_files: - split: train path: "GUE/human_tf_4/train.csv" - split: test path: "GUE/human_tf_4/test.csv" - split: dev path: "GUE/human_tf_4/dev.csv" - config_name: mouse_0 data_files: - split: train path: "GUE/mouse_0/train.csv" - split: test path: "GUE/mouse_0/test.csv" - split: dev path: "GUE/mouse_0/dev.csv" - config_name: mouse_1 data_files: - split: train path: "GUE/mouse_1/train.csv" - split: test path: "GUE/mouse_1/test.csv" - split: dev path: "GUE/mouse_1/dev.csv" - config_name: mouse_2 data_files: - split: train path: "GUE/mouse_2/train.csv" - split: test path: "GUE/mouse_2/test.csv" - split: dev path: "GUE/mouse_2/dev.csv" - config_name: mouse_3 data_files: - split: train path: "GUE/mouse_3/train.csv" - split: test path: "GUE/mouse_3/test.csv" - split: dev path: "GUE/mouse_3/dev.csv" - config_name: mouse_4 data_files: - split: train path: "GUE/mouse_4/train.csv" - split: test path: "GUE/mouse_4/test.csv" - split: dev path: "GUE/mouse_4/dev.csv" - config_name: prom_300_all data_files: - split: train path: "GUE/prom_300_all/train.csv" - split: test path: "GUE/prom_300_all/test.csv" - split: dev path: "GUE/prom_300_all/dev.csv" - config_name: prom_300_notata data_files: - split: train path: "GUE/prom_300_notata/train.csv" - split: test path: "GUE/prom_300_notata/test.csv" - split: dev path: "GUE/prom_300_notata/dev.csv" - config_name: prom_300_tata data_files: - split: train path: "GUE/prom_300_tata/train.csv" - split: test path: "GUE/prom_300_tata/test.csv" - split: dev path: "GUE/prom_300_tata/dev.csv" - config_name: prom_core_all data_files: - split: train path: "GUE/prom_core_all/train.csv" - split: test path: "GUE/prom_core_all/test.csv" - split: dev path: "GUE/prom_core_all/dev.csv" - config_name: prom_core_notata data_files: - split: train path: "GUE/prom_core_notata/train.csv" - split: test path: "GUE/prom_core_notata/test.csv" - split: dev path: "GUE/prom_core_notata/dev.csv" - config_name: prom_core_tata data_files: - split: train path: "GUE/prom_core_tata/train.csv" - split: test path: "GUE/prom_core_tata/test.csv" - split: dev path: "GUE/prom_core_tata/dev.csv" - config_name: splice_reconstructed data_files: - split: train path: "GUE/splice_reconstructed/train.csv" - split: test path: "GUE/splice_reconstructed/test.csv" - split: dev path: "GUE/splice_reconstructed/dev.csv" - config_name: virus_covid data_files: - split: train path: "GUE/virus_covid/train.csv" - split: test path: "GUE/virus_covid/test.csv" - split: dev path: "GUE/virus_covid/dev.csv" --- This is a copy of the Genome Understanding Evaluation (GUE) that was presented in DNABERT-2: Efficient Foundation Model and Benchmark For Multi-Species Genome Zhihan Zhou and Yanrong Ji and Weijian Li and Pratik Dutta and Ramana Davuluri and Han Liu and is available to download directly from https://github.com/MAGICS-LAB/DNABERT_2 If you use this dataset, please cite @misc{zhou2023dnabert2, title={DNABERT-2: Efficient Foundation Model and Benchmark For Multi-Species Genome}, author={Zhihan Zhou and Yanrong Ji and Weijian Li and Pratik Dutta and Ramana Davuluri and Han Liu}, year={2023}, eprint={2306.15006}, archivePrefix={arXiv}, primaryClass={q-bio.GN} } **Instructions to Load Dataset in Google Colab** ``` # choose the dataset that you wish to load, ex: prom_core_all from datasets import load_dataset, get_dataset_config_names config_names = get_dataset_config_names("leannmlindsey/GUE") print(config_names) prom_core_all = load_dataset("leannmlindsey/GUE", name="prom_core_all") prom_core_all prom_core_all["train"][0] ```