# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RealPersonaChat: A Realistic Persona Chat Corpus with Interlocutors' Own Personalities This script is based on https://github.com/huggingface/datasets/blob/d69d1c654c4645a0474731794a20d4c012d2d214/templates/new_dataset_script.py """ import json from pathlib import Path import datasets _CITATION = """\ @inproceedings{yamashita-etal-2023-realpersonachat, title = "{R}eal{P}ersona{C}hat: A Realistic Persona Chat Corpus with Interlocutors{'} Own Personalities", author = "Yamashita, Sanae and Inoue, Koji and Guo, Ao and Mochizuki, Shota and Kawahara, Tatsuya and Higashinaka, Ryuichiro", booktitle = "Proceedings of the 37th Pacific Asia Conference on Language, Information and Computation", year = "2023", pages = "852--861" } @inproceedings{yamashita-etal-2024-realpersonachat-ja, title = "{R}eal{P}ersona{C}hat: 話者本人のペルソナと性格特性を含んだ雑談対話コーパス", author = "山下 紗苗 and 井上 昂治 and 郭 傲 and 望月 翔太 and 河原 達也 and 東中 竜一郎", booktitle = "言語処理学会第30回年次大会発表論文集", year = "2024", pages = "2738--2743" } """ _DESCRIPTION = """\ RealPersonaChat: A Realistic Persona Chat Corpus with Interlocutors' Own Personalities """ _HOMEPAGE = "https://github.com/nu-dialogue/real-persona-chat/" _LICENSE = "CC BY-ND 4.0" _VERSION = "1.0.0" _URL = f"https://github.com/nu-dialogue/real-persona-chat/archive/refs/tags/v{_VERSION}.zip" class RealPersonaChat(datasets.GeneratorBasedBuilder): """RealPersonaChat consists of dialogues and interlocutor information.""" VERSION = datasets.Version(_VERSION) BUILDER_CONFIGS = [ datasets.BuilderConfig( name="dialogue", version=VERSION, description="This part contains dialogues" ), datasets.BuilderConfig( name="interlocutor", version=VERSION, description="This part contains interlocutor information" ) ] DEFAULT_CONFIG_NAME = "dialogue" def _info(self): if self.config.name == "dialogue": features = datasets.Features( { "dialogue_id": datasets.Value("int32"), "interlocutors": datasets.Sequence(datasets.Value("string"), length=2), "utterances": datasets.Sequence( { "utterance_id": datasets.Value("int32"), "interlocutor_id": datasets.Value("string"), "text": datasets.Value("string"), "timestamp": datasets.Value("timestamp[us]") } ), "evaluations": datasets.Sequence( { "interlocutor_id": datasets.Value("string"), "informativeness": datasets.Value("int32"), "comprehension": datasets.Value("int32"), "familiarity": datasets.Value("int32"), "interest": datasets.Value("int32"), "proactiveness": datasets.Value("int32"), "satisfaction": datasets.Value("int32") } ) } ) elif self.config.name == "interlocutor": features = datasets.Features( { "interlocutor_id": datasets.Value("string"), "persona": datasets.Sequence(datasets.Value("string"), length=10), "personality": { "BigFive_Openness": datasets.Value("float32"), "BigFive_Conscientiousness": datasets.Value("float32"), "BigFive_Extraversion": datasets.Value("float32"), "BigFive_Agreeableness": datasets.Value("float32"), "BigFive_Neuroticism": datasets.Value("float32"), "KiSS18_BasicSkill": datasets.Value("float32"), "KiSS18_AdvancedSkill": datasets.Value("float32"), "KiSS18_EmotionalManagementSkill": datasets.Value("float32"), "KiSS18_OffenceManagementSkill": datasets.Value("float32"), "KiSS18_StressManagementSkill": datasets.Value("float32"), "KiSS18_PlanningSkill": datasets.Value("float32"), "IOS": datasets.Value("int32"), "ATQ_Fear": datasets.Value("float32"), "ATQ_Frustration": datasets.Value("float32"), "ATQ_Sadness": datasets.Value("float32"), "ATQ_Discomfort": datasets.Value("float32"), "ATQ_ActivationControl": datasets.Value("float32"), "ATQ_AttentionalControl": datasets.Value("float32"), "ATQ_InhibitoryControl": datasets.Value("float32"), "ATQ_Sociability": datasets.Value("float32"), "ATQ_HighIntensityPleasure": datasets.Value("float32"), "ATQ_PositiveAffect": datasets.Value("float32"), "ATQ_NeutralPerceptualSensitivity": datasets.Value("float32"), "ATQ_AffectivePerceptualSensitivity": datasets.Value("float32"), "ATQ_AssociativeSensitivity": datasets.Value("float32"), "SMS_Extraversion": datasets.Value("float32"), "SMS_OtherDirectedness": datasets.Value("float32"), "SMS_Acting": datasets.Value("float32"), }, "demographic_information": { "gender": datasets.ClassLabel(names=["Male", "Female", "Other"]), "age": datasets.ClassLabel(names=["-19", "20-29", "30-39", "40-49", "50-59", "60-69"]), "education": datasets.ClassLabel(names=["High school graduate", "Two-year college", "Four-year college", "Postgraduate", "Other"]), "employment_status": datasets.ClassLabel(names=["Employed", "Homemaker", "Student", "Retired", "Unable to work", "None"]), "region_of_residence": datasets.ClassLabel(names=["Hokkaido", "Aomori", "Iwate", "Miyagi", "Akita", "Yamagata", "Fukushima", "Ibaraki", "Tochigi", "Gunma", "Saitama", "Chiba", "Tokyo", "Kanagawa", "Niigata", "Toyama", "Ishikawa", "Fukui", "Yamanashi", "Nagano", "Gifu", "Shizuoka", "Aichi", "Mie", "Shiga", "Kyoto", "Osaka", "Hyogo", "Nara", "Wakayama", "Tottori", "Shimane", "Okayama", "Hiroshima", "Yamaguchi", "Tokushima", "Kagawa", "Ehime", "Kochi", "Fukuoka", "Saga", "Nagasaki", "Kumamoto", "Oita", "Miyazaki", "Kagoshima", "Okinawa"]), }, "text_chat_experience": { "age_of_first_chat": datasets.ClassLabel(names=["-9", "10-19", "20-29", "30-39", "40-49", "50-59"]), "frequency": datasets.ClassLabel(names=["Every day", "Once every few days", "Once a week", "Less frequent than these"]), "chatting_partners": datasets.Sequence(datasets.ClassLabel(names=["Family", "Friend", "Colleague", "Other"])), "typical_chat_content": datasets.Value("string"), } } ) else: raise ValueError(f"Config name `{self.config.name}` is invalid.") return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): data_dir = dl_manager.download_and_extract(_URL) if self.config.name == "dialogue": filepath_list = Path(data_dir, f"real-persona-chat-{_VERSION}", "real_persona_chat", "dialogues").glob("*.json") filepath_list = list(sorted(filepath_list, key=lambda x: int(x.stem))) elif self.config.name == "interlocutor": filepath_list = Path(data_dir, f"real-persona-chat-{_VERSION}", "real_persona_chat").glob("interlocutors.json") else: raise ValueError(f"Config name `{self.config.name}` is invalid.") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath_list": filepath_list } ) ] def _generate_examples(self, filepath_list): if self.config.name == "dialogue": for filepath in filepath_list: key = filepath.stem with open(filepath, encoding="utf-8") as f: example = json.load(f) for utterance_id in range(len(example["utterances"])): timestamp = example["utterances"][utterance_id]["timestamp"] if timestamp == "NaT": example["utterances"][utterance_id]["timestamp"] = "0001-01-01T00:00:00.000000" yield key, example elif self.config.name == "interlocutor": for filepath in filepath_list: with open(filepath, encoding="utf-8") as f: interlocutors = json.load(f) for key, example in interlocutors.items(): yield key, example else: raise ValueError(f"Config name `{self.config.name}` is invalid.")