"""
目标：跟据一篇故事的 actions.json 和 action_seq_rel.json ，生成指定的一个数据集 train/test/dev
划分策略：event 数量从 0 到 n，text 永远从 0 开始。
schema 中不需要指令
跟据标注数据准备 rel 和 event 混合的数据集
注意事项：
    # fix7(DONE): 验证数据集拆分的正确性
    # fix8: 增加负向数据，即没有 event 的数据
"""

import os
import json
import random
import copy
from nltk.tokenize import sent_tokenize

cur_dir = os.path.dirname(os.path.abspath(__file__))

### 数据格式
class DataTemplate:
    def __init__(
            self, 
            id, 
            ent_list, 
            rel_list, 
            event_dict, 
            ans_ent_list, 
            ans_rel_list, 
            ans_event_list, 
            text, 
            bg
        ):
        self.id = id
        self.ent_list = ent_list
        self.rel_list = rel_list
        self.event_dict = event_dict
        self.ans_ent_list = ans_ent_list
        self.ans_rel_list = ans_rel_list
        self.ans_event_list = ans_event_list
        self.text = text
        self.bg = bg

    def assemble_data(self):
        return {
            "id": self.id, 
            "schema": {
                # "ent": self.ent_list, 
                "rel": self.rel_list, 
                "event": self.event_dict
            }, 
            "ans": {
                # "ent": self.ans_ent_list, 
                "rel": self.ans_rel_list, 
                "event": self.ans_event_list
            }, 
            "text": self.text, 
            "bg": self.bg
        }

# region 预处理数据集的一系列方法
# 从 .txt 文件中读取故事文本
def get_story_from_txt(cur_dir: str, filename: str):
    try:
        with open(os.path.join(cur_dir, filename), 'r', encoding='utf-8') as f:
            story_en = f.read()
        return story_en
    except:
        print(f"Error: {filename} for story not found in {cur_dir}")
        return None

# generate story sentence list of positions
def get_story_span_list(story_en):
    sentences = sent_tokenize(story_en)
    start_pos = 0
    end_pos = 0
    story_span_list = []
    for sentence in sentences:
        end_pos = start_pos + len(sentence)
        template_dict = { "span": [start_pos, end_pos], "sentence": sentence }
        story_span_list.append(template_dict)
        start_pos = end_pos + 1 # because of a space exists between sentences
    return story_span_list

# 生成一条数据的 id
def gen_id(data_type: str, id_num: int):
    return f"story.event.{data_type}.{id_num}"

# 生成一条数据的 bg
def gen_bg():
    pass

# 从文件中读取 json 内容
def get_json_from_file(dir_path: str, filename: str):
    try:
        with open(os.path.join(dir_path, filename), "r", encoding="utf-8") as f:
            content = json.load(f)
            return content
    except:
        print(f"Error: {filename} not found in {dir_path}")
        return None

# 目标：划定动作的所有参数 text 的界限，划定动作触发词 trigger 的界限
def get_cover_span(temp_event_list: list, story_span_list: list):
    # 动作，包括动作的所有参数的覆盖范围
    text_start_index = 99999999
    text_end_index = 0
    # 动作，仅包括动作触发词的覆盖范围
    action_start_index = text_start_index
    action_end_index = text_end_index
    for event in temp_event_list:
        if event["trigger"]["span"][0] < action_start_index:
            action_start_index = event["trigger"]["span"][0]
        if event["trigger"]["span"][1] > action_end_index:
            action_end_index = event["trigger"]["span"][1]
        if event["trigger"]["span"][0] < text_start_index:
            text_start_index = event["trigger"]["span"][0]
        if event["trigger"]["span"][1] > text_end_index:
            text_end_index = event["trigger"]["span"][1]
        for arg in event["args"]:
            if arg["span"][0] < text_start_index:
                text_start_index = arg["span"][0]
            if arg["span"][1] > text_end_index:
                text_end_index = arg["span"][1]
    # 从 start_index 和 end_index 的字符开始，分别找到 temp_event_list 的第一句话的开始下标和最后一句话的结束下标
    # there must has a space between sentences for processed data, so it can preprocess the scale of each sentence after split sentences with nltk
    for story_span in story_span_list: # update start and end index
        if story_span['span'][0] <= text_start_index and text_start_index <= story_span['span'][1]:
            text_start_index = story_span['span'][0]
        if story_span['span'][0] <= text_end_index and text_end_index <= story_span['span'][1]:
            text_end_index = story_span['span'][1]
    return text_start_index, text_end_index, action_start_index, action_end_index

# 输入：动作关系列表
# 输出：动作关系列表中，start_pos 和 end_pos 之间的动作关系列表
def get_rel_list(action_seq_rel: list, start_pos: int, end_pos: int):
    return [action_rel for action_rel in action_seq_rel if action_rel["head"]["span"][0] >= start_pos and action_rel["tail"]["span"][1] <= end_pos and action_rel["head"]["span"][1] <= end_pos and action_rel["tail"]["span"][0] >= start_pos]

# 打乱数据集
def shuffle_dataset(datasets: list):
    for dataset in datasets:
        random.shuffle(dataset)

# 输入：actions.json、action_seq_rel.json
# 输出：ans_ent_list、ans_rel_list、ans_event_list、text
def gen_ans_ent_rel_event_text_from_json_file(cur_dir: str, story_name: str, story_en: str, story_span_list: list):
    # 所有数据
    total_data = []
    # 读取 actions.json 文件
    actions = get_json_from_file(cur_dir, f"../out/{story_name}/actions.json")
    # 读取 action_seq_rel.json 文件
    action_seq_rel = get_json_from_file(cur_dir, f"../out/{story_name}/action_seq_rel.json")

    # 生成 n 条数据，事件个数从 0 到 n=len(actions)
    for event_num in range(len(actions)):
        # 生成 ans_event_list
        ans_event_list = actions[:event_num+1]
        text_start_pos, text_end_pos, action_start_pos, action_end_pos = get_cover_span(ans_event_list, story_span_list)
        # 生成 ans_rel_list
        ans_rel_list = get_rel_list(action_seq_rel, action_start_pos, action_end_pos)
        # 生成 text
        text = story_en[text_start_pos:text_end_pos]

        # 用 DataTemplate 组装一条数据
        a_piece_of_data = DataTemplate(
            id="", # 在 shuffle 的时候赋值
            ent_list=get_json_from_file(cur_dir, "../config/ent_list.json"),
            rel_list=get_json_from_file(cur_dir, "../config/rel_list.json"),
            event_dict=get_json_from_file(cur_dir, "../config/event_dict.json"),
            ans_ent_list=[],
            ans_rel_list=ans_rel_list,
            ans_event_list=ans_event_list,
            text=text,
            bg=""
        )
                    
        # 将 a_piece_of_data 添加到 total_data 中
        total_data.append(a_piece_of_data)

    # 生成 n 条负向数据，每条负向数据由一条正向数据删除一个动作 trigger 生成
    for neg_event_num in range(len(actions)):
        temp_event_list = actions[:neg_event_num+1]
        text_start_pos, text_end_pos, _, _ = get_cover_span(temp_event_list, story_span_list)
        # 生成 text
        neg_text = ""
        for idx in range(len(temp_event_list)):
            if idx == 0:
                neg_text += story_en[text_start_pos:temp_event_list[idx]["trigger"]["span"][0]]
            elif idx < len(temp_event_list) - 1:
                neg_text += story_en[temp_event_list[idx-1]["trigger"]["span"][1]:temp_event_list[idx]["trigger"]["span"][0]]
            else:
                neg_text += story_en[temp_event_list[idx-1]["trigger"]["span"][1]:text_end_pos]

        # 用 DataTemplate 组装一条数据
        a_piece_of_data = DataTemplate(
            id="", # 在 shuffle 的时候赋值
            ent_list=get_json_from_file(cur_dir, "../config/ent_list.json"),
            rel_list=get_json_from_file(cur_dir, "../config/rel_list.json"),
            event_dict=get_json_from_file(cur_dir, "../config/event_dict.json"),
            ans_ent_list=[],
            ans_rel_list=[],
            ans_event_list=[],
            text=neg_text,
            bg=""
        )

        # 将 a_piece_of_data 添加到 total_data 中
        total_data.append(a_piece_of_data)

    # 删除 total_data 中的重复数据
    total_data = list(set(total_data))

    # 打乱 total_data
    shuffle_dataset([total_data])
    
    return total_data

# 验证数据集的正确性
def verify_dataset(dataset_list: list):
    for dataset in dataset_list:
        for data in dataset:
            # 验证 ans_ent_list
            for data_ent in data.ans_ent_list:
                original_text = data.text[data_ent["span"][0]:data_ent["span"][1]]
                extracted_text = data_ent["text"]
                if original_text != extracted_text:
                    print(f"id: {data.id} 的 ans_ent_list 验证失败，{original_text} != {extracted_text}")
                    return False
            # 验证 ans_rel_list
            for data_rel in data.ans_rel_list:
                original_text_1 = data.text[data_rel["head"]["span"][0]:data_rel["head"]["span"][1]]
                original_text_2 = data.text[data_rel["tail"]["span"][0]:data_rel["tail"]["span"][1]]
                extracted_text_1 = data_rel["head"]["text"]
                extracted_text_2 = data_rel["tail"]["text"]
                if original_text_1 != extracted_text_1 or original_text_2 != extracted_text_2:
                    print(f"id: {data.id} 的 ans_rel_list 验证失败，{original_text_1}:[{data_rel['head']['span'][0]},{data_rel['head']['span'][1]}) != {extracted_text_1} 或 {original_text_2}:[{data_rel['tail']['span'][0]},{data_rel['tail']['span'][1]}) != {extracted_text_2}")
                    return False
            # 验证 ans_event_list
            for data_event in data.ans_event_list:
                # 验证 trigger
                original_text = data.text[data_event["trigger"]["span"][0]:data_event["trigger"]["span"][1]]
                extracted_text = data_event["trigger"]["text"]
                if original_text != extracted_text:
                    print(f"id: {data.id} 的 ans_event_list 中 trigger 验证失败，{original_text} != {extracted_text}")
                    return False
                # 验证 args
                for data_arg in data_event["args"]:
                    original_text = data.text[data_arg["span"][0]:data_arg["span"][1]]
                    extracted_text = data_arg["text"]
                    if original_text != extracted_text:
                        print(f"id: {data.id} 的 ans_event_list 中 args 验证失败，{original_text} != {extracted_text}")
                        return False
    return True

# 保存数据集到 jsonl 文件
def save_to_jsonl(data_list, filename):
    def default(obj):
        return obj.__dict__
    try:
        with open(os.path.join(cur_dir, filename), 'w', encoding='utf-8') as f:
            for data in data_list:
                # 将字典转换为JSON格式的字符串，并写入文件，每个对象占一行
                json_line = json.dumps(data.assemble_data(), ensure_ascii=False, default=default) + '\n'
                f.write(json_line)
    except Exception as e:
        print(f"Error occurred while saving data to {filename}: {e}")
# endregion 预处理数据集的一系列方法

# region 初始化
小红帽_story_en = get_story_from_txt(cur_dir, "../story/小红帽.txt")
小红帽_story_span_list = get_story_span_list(小红帽_story_en)
白雪公主_story_en = get_story_from_txt(cur_dir, "../story/白雪公主.txt")
白雪公主_story_span_list = get_story_span_list(白雪公主_story_en)
青蛙王子_story_en = get_story_from_txt(cur_dir, "../story/青蛙王子.txt")
青蛙王子_story_span_list = get_story_span_list(青蛙王子_story_en)
# endregion 初始化

# 生成数据集
train_dataset = gen_ans_ent_rel_event_text_from_json_file(cur_dir, '小红帽', 小红帽_story_en, 小红帽_story_span_list)
test_dataset = gen_ans_ent_rel_event_text_from_json_file(cur_dir, '白雪公主', 白雪公主_story_en, 白雪公主_story_span_list)
dev_dataset = gen_ans_ent_rel_event_text_from_json_file(cur_dir, '青蛙王子', 青蛙王子_story_en, 青蛙王子_story_span_list)

# 生成数据 id
for i in range(len(train_dataset)):
    train_dataset[i].id = gen_id('train', i+1)
for i in range(len(test_dataset)):
    test_dataset[i].id = gen_id('test', i+1)
for i in range(len(dev_dataset)):
    dev_dataset[i].id = gen_id('dev', i+1)

# 验证数据集
if verify_dataset([train_dataset, test_dataset, dev_dataset]):
    print("数据集验证通过")
else:
    print("数据集验证失败")

# 保存数据集
save_to_jsonl(train_dataset, "../data/train.jsonl")
save_to_jsonl(test_dataset, "../data/test.jsonl")
save_to_jsonl(dev_dataset, "../data/dev.jsonl")

# 打印数据集长度
print(f"已保存 {len(train_dataset)} 条训练数据，{len(test_dataset)} 条测试数据，{len(dev_dataset)} 条验证数据")
