mzozulia's picture
Upload 339 files
2ea1065
#!/usr/bin/env python3
#
import random
import sys, os, pdb
import json, math
import datasets
from datasets import load_dataset
import csv
random.seed(42)
DATASETS = {
"natural_language_understanding": [
"ATIS", "ATIS-NER", "BANKING77", "BANKING77-OOS", "CLINC-Single-Domain-OOS-banking",
"CLINC-Single-Domain-OOS-credit_cards", "CLINC150", "DSTC8-SGD", "HWU64", "MIT-Movie",
"MIT-Restaurant", "RESTAURANTS8K", "SNIPS", "SNIPS-NER", "TOP", "TOP-NER"
],
"task_oriented": [
"ABCD", "AirDialogue", "BiTOD", "CaSiNo", "CraigslistBargains",
"Disambiguation", "DSTC2-Clean", "FRAMES", "GECOR", "HDSA-Dialog",
"KETOD", "KVRET", "MetaLWOZ", "MS-DC", "MuDoCo",
"MulDoGO", "MultiWOZ_2.1", "MULTIWOZ2_2", "SGD", "SimJointGEN",
"SimJointMovie", "SimJointRestaurant", "STAR", "Taskmaster1", "Taskmaster2",
"Taskmaster3", "WOZ2_0"
],
"dialogue_summarization": [
"AMI", "CRD3", "DialogSum", "ECTSum", "ICSI",
"MediaSum", "QMSum", "SAMSum", "TweetSumm", "ConvoSumm",
"SummScreen_ForeverDreaming", "SummScreen_TVMegaSite"
],
"conversational_recommendation": [
"Redial", "DuRecDial-2.0", "OpenDialKG", "SalesBot",
],
"open_domain": [
"chitchat-dataset", "ConvAI2", "AntiScam", "Empathetic", "HH-RLHF",
"PLACES3.5", "Prosocial", "SODA"
],
"knowledge_grounded": [
"CompWebQ", "CoQA", "CoSQL", "DART", "FeTaQA",
"GrailQA", "HybridQA", "MTOP", "MultiModalQA", "SParC",
"Spider", "SQA", "ToTTo", "WebQSP", "WikiSQL",
"WikiTQ", "wizard_of_internet", "wizard_of_wikipedia"
],
}
class Test(object):
def __init__(self):
pass
def test_single_dataset(self, data_name):
# dataset = load_dataset("Salesforce/dialogstudio", data_name, revision="download")
dataset = load_dataset("Salesforce/dialogstudio", data_name, revision="download")
dataset_size = {
"train":0,
"validation": 0,
"test": 0,
}
for split in dataset:
dataset_size[split] = len(dataset[split])
print(dataset_size)
# pdb.set_trace()
return dataset_size
def test_all(self):
with open("dataset_stats.csv", "w", newline="") as tf:
writer = csv.writer(tf)
writer.writerow(["Category", "Data_name", "train", "val", "test"])
for cat, dataset_list in DATASETS.items():
for data_name in dataset_list:
dataset_size = self.test_single_dataset(data_name=data_name)
writer.writerow([cat, data_name] + list(dataset_size.values()))
def main():
test = Test()
# test.test_all()
# test.test_single_dataset("WOZ2_0")
# test.test_single_dataset("MULTIWOZ2_2")
# test.test_single_dataset("Taskmaster1")
test.test_single_dataset("Taskmaster2")
if __name__ == "__main__":
main()