|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""RiSAWOZ: A Large-Scale Multi-Domain Wizard-of-Oz Dataset with Rich Semantic Annotations for Task-Oriented Dialogue Modeling""" |
|
|
|
|
|
import json |
|
import os |
|
from typing import Dict |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{quan-etal-2020-risawoz, |
|
title = "{R}i{SAWOZ}: A Large-Scale Multi-Domain {W}izard-of-{O}z Dataset with Rich Semantic Annotations for Task-Oriented Dialogue Modeling", |
|
author = "Quan, Jun and |
|
Zhang, Shian and |
|
Cao, Qian and |
|
Li, Zizhong and |
|
Xiong, Deyi", |
|
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
month = nov, |
|
year = "2020", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://www.aclweb.org/anthology/2020.emnlp-main.67", |
|
pages = "930--940", |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
RiSAWOZ contains 11.2K human-to-human (H2H) multiturn semantically annotated dialogues, \ |
|
with more than 150K utterances spanning over 12 domains, \ |
|
which is larger than all previous annotated H2H conversational datasets.\ |
|
Both single- and multi-domain dialogues are constructed, accounting for 65% and 35%, respectively. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/terryqj0107/RiSAWOZ" |
|
|
|
_LICENSE = "Attribution 4.0 International (CC BY 4.0) license." |
|
|
|
_EMPTY_BELIEF_STATE = [ |
|
"旅游景点-名称", |
|
"旅游景点-区域", |
|
"旅游景点-景点类型", |
|
"旅游景点-最适合人群", |
|
"旅游景点-消费", |
|
"旅游景点-是否地铁直达", |
|
"旅游景点-门票价格", |
|
"旅游景点-电话号码", |
|
"旅游景点-地址", |
|
"旅游景点-评分", |
|
"旅游景点-开放时间", |
|
"旅游景点-特点", |
|
"餐厅-名称", |
|
"餐厅-区域", |
|
"餐厅-菜系", |
|
"餐厅-价位", |
|
"餐厅-是否地铁直达", |
|
"餐厅-人均消费", |
|
"餐厅-地址", |
|
"餐厅-电话号码", |
|
"餐厅-评分", |
|
"餐厅-营业时间", |
|
"餐厅-推荐菜", |
|
"酒店-名称", |
|
"酒店-区域", |
|
"酒店-星级", |
|
"酒店-价位", |
|
"酒店-酒店类型", |
|
"酒店-房型", |
|
"酒店-停车场", |
|
"酒店-房费", |
|
"酒店-地址", |
|
"酒店-电话号码", |
|
"酒店-评分", |
|
"电脑-品牌", |
|
"电脑-产品类别", |
|
"电脑-分类", |
|
"电脑-内存容量", |
|
"电脑-屏幕尺寸", |
|
"电脑-CPU", |
|
"电脑-价格区间", |
|
"电脑-系列", |
|
"电脑-商品名称", |
|
"电脑-系统", |
|
"电脑-游戏性能", |
|
"电脑-CPU型号", |
|
"电脑-裸机重量", |
|
"电脑-显卡类别", |
|
"电脑-显卡型号", |
|
"电脑-特性", |
|
"电脑-色系", |
|
"电脑-待机时长", |
|
"电脑-硬盘容量", |
|
"电脑-价格", |
|
"火车-出发地", |
|
"火车-目的地", |
|
"火车-日期", |
|
"火车-车型", |
|
"火车-坐席", |
|
"火车-车次信息", |
|
"火车-时长", |
|
"火车-出发时间", |
|
"火车-到达时间", |
|
"火车-票价", |
|
"飞机-出发地", |
|
"飞机-目的地", |
|
"飞机-日期", |
|
"飞机-舱位档次", |
|
"飞机-航班信息", |
|
"飞机-起飞时间", |
|
"飞机-到达时间", |
|
"飞机-票价", |
|
"飞机-准点率", |
|
"天气-城市", |
|
"天气-日期", |
|
"天气-天气", |
|
"天气-温度", |
|
"天气-风力风向", |
|
"天气-紫外线强度", |
|
"电影-制片国家/地区", |
|
"电影-类型", |
|
"电影-年代", |
|
"电影-主演", |
|
"电影-导演", |
|
"电影-片名", |
|
"电影-主演名单", |
|
"电影-具体上映时间", |
|
"电影-片长", |
|
"电影-豆瓣评分", |
|
"电视剧-制片国家/地区", |
|
"电视剧-类型", |
|
"电视剧-年代", |
|
"电视剧-主演", |
|
"电视剧-导演", |
|
"电视剧-片名", |
|
"电视剧-主演名单", |
|
"电视剧-首播时间", |
|
"电视剧-集数", |
|
"电视剧-单集片长", |
|
"电视剧-豆瓣评分", |
|
"辅导班-班号", |
|
"辅导班-难度", |
|
"辅导班-科目", |
|
"辅导班-年级", |
|
"辅导班-区域", |
|
"辅导班-校区", |
|
"辅导班-上课方式", |
|
"辅导班-开始日期", |
|
"辅导班-结束日期", |
|
"辅导班-每周", |
|
"辅导班-上课时间", |
|
"辅导班-下课时间", |
|
"辅导班-时段", |
|
"辅导班-课次", |
|
"辅导班-课时", |
|
"辅导班-教室地点", |
|
"辅导班-教师", |
|
"辅导班-价格", |
|
"辅导班-课程网址", |
|
"辅导班-教师网址", |
|
"汽车-名称", |
|
"汽车-车型", |
|
"汽车-级别", |
|
"汽车-座位数", |
|
"汽车-车身尺寸(mm)", |
|
"汽车-厂商", |
|
"汽车-能源类型", |
|
"汽车-发动机排量(L)", |
|
"汽车-发动机马力(Ps)", |
|
"汽车-驱动方式", |
|
"汽车-综合油耗(L/100km)", |
|
"汽车-环保标准", |
|
"汽车-驾驶辅助影像", |
|
"汽车-巡航系统", |
|
"汽车-价格(万元)", |
|
"汽车-车系", |
|
"汽车-动力水平", |
|
"汽车-油耗水平", |
|
"汽车-倒车影像", |
|
"汽车-定速巡航", |
|
"汽车-座椅加热", |
|
"汽车-座椅通风", |
|
"汽车-所属价格区间", |
|
"医院-名称", |
|
"医院-等级", |
|
"医院-类别", |
|
"医院-性质", |
|
"医院-区域", |
|
"医院-地址", |
|
"医院-电话", |
|
"医院-挂号时间", |
|
"医院-门诊时间", |
|
"医院-公交线路", |
|
"医院-地铁可达", |
|
"医院-地铁线路", |
|
"医院-重点科室", |
|
"医院-CT", |
|
"医院-3.0T MRI", |
|
"医院-DSA", |
|
] |
|
|
|
|
|
class RiSAWOZ(datasets.GeneratorBasedBuilder): |
|
"""RiSAWOZ: A Large-Scale Multi-Domain Wizard-of-Oz Dataset with Rich Semantic Annotations for Task-Oriented Dialogue Modeling""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"dialogue_id": datasets.Value("string"), |
|
"goal": datasets.Value("string"), |
|
"domains": datasets.Sequence(datasets.Value("string")), |
|
"dialogue": [ |
|
{ |
|
"turn_id": datasets.Value("int32"), |
|
"turn_domain": datasets.Sequence(datasets.Value("string")), |
|
"user_utterance": datasets.Value("string"), |
|
"system_utterance": datasets.Value("string"), |
|
"belief_state": { |
|
"inform slot-values": { |
|
d: datasets.Value("string") for d in _EMPTY_BELIEF_STATE |
|
}, |
|
|
|
"turn_inform": { |
|
d: datasets.Value("string") for d in _EMPTY_BELIEF_STATE |
|
}, |
|
"turn request": datasets.Sequence(datasets.Value("string")), |
|
}, |
|
"user_actions": datasets.Sequence( |
|
datasets.Sequence(datasets.Value("string")) |
|
), |
|
"system_actions": datasets.Sequence( |
|
datasets.Sequence(datasets.Value("string")) |
|
), |
|
"db_results": datasets.Sequence(datasets.Value("string")), |
|
"segmented_user_utterance": datasets.Value("string"), |
|
"segmented_system_utterance": datasets.Value("string"), |
|
} |
|
], |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
_URL = {"train": "train.json", "test": "test.json", "dev": "dev.json"} |
|
|
|
data_dir2 = dl_manager.download_and_extract(_URL) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
|
|
"filepath": data_dir2["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"filepath": data_dir2["test"], "split": "test"}, |
|
), |
|
datasets.SplitGenerator( |
|
name="challenge", |
|
|
|
gen_kwargs={"filepath": data_dir2["test"], "split": "challenge"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir2["dev"], |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
filepath, |
|
split, |
|
): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
all_data = json.load(f) |
|
id_ = 0 |
|
for data in all_data: |
|
for slot in _EMPTY_BELIEF_STATE: |
|
for dia in data["dialogue"]: |
|
if slot not in dia["belief_state"]["inform slot-values"]: |
|
dia["belief_state"]["inform slot-values"][slot] = "" |
|
if slot not in dia["belief_state"]["turn_inform"]: |
|
dia["belief_state"]["turn_inform"][slot] = "" |
|
|
|
yield id_, { |
|
"dialogue_id": data["dialogue_id"], |
|
"goal": data["goal"], |
|
"domains": data["domains"], |
|
"dialogue": data["dialogue"], |
|
} |
|
id_ += 1 |
|
|