"""
拆数据ing：原故事文本的 ≤ 569 的位置的所有实体、关系、动作，作为验证集。原故事文本的 ≥3391 的位置的所有实体、关系、动作，作为测试集。（下标从 0 开始）
"""

import os
import json

# 划分策略，默认第 0 部分是训练集，第 1 部分是测试集，第 2 部分是验证集
strategy_offset = 1 # 数据集下标的偏移量，例如，如果 strategy_offset = 1，则第 1 部分是训练集，第 2 部分是测试集，第 0 部分是验证集

# 划分界限
split_boundary_up = 570
split_boundary_down = 3391

cur_dir = os.path.dirname(os.path.abspath(__file__))

# 从 txt 文件中读取原故事文本
with open(os.path.join(cur_dir, 'story_en.txt'), 'r', encoding='utf-8') as f:
    story_en = f.read()

# 从 json 文件中读取原数据集
with open(os.path.join(cur_dir, '../finetune_data_1.json'), 'r', encoding='utf-8') as f:
    data = json.load(f)

# 划分后的 3 个数据集的 id
train_id = 'story.action.train.1'
test_id = 'story.action.test.1'
dev_id = 'story.action.dev.1'

# 使用工厂模式，结合 strategy_offset 来确定向 train_list 还是 test_list 还是 dev_list 添加元素，同时确定对应数据集的文本内容
class Factory:
    def __init__(self, item_type):
        self.item_type = item_type
        self.train_list = []
        self.test_list = []
        self.dev_list = []

    """
    决定向哪个数据集中添加元素
    """
    def appendToList(self, index, item):
        if (index - strategy_offset) % 3 == 0:
            self.train_list.append(item)
        elif (index - strategy_offset) % 3 == 1:
            self.test_list.append(item)
        else:
            self.dev_list.append(item)

# 创建不同类型的工厂
factory_for_ent = Factory("ent")
factory_for_rel = Factory("rel")
factory_for_action = Factory("event")
# 跟据 strategy_offset 划分数据集的文本，注意边界
if strategy_offset % 3 == 0:
    train_story_en = story_en[:split_boundary_up]
    test_story_en = story_en[split_boundary_up:split_boundary_down]
    dev_story_en = story_en[split_boundary_down:]
elif strategy_offset % 3 == 1:
    train_story_en = story_en[split_boundary_up:split_boundary_down]
    test_story_en = story_en[split_boundary_down:]
    dev_story_en = story_en[:split_boundary_up]
else:
    train_story_en = story_en[split_boundary_down:]
    test_story_en = story_en[:split_boundary_up]
    dev_story_en = story_en[split_boundary_up:split_boundary_down]
# 去掉文本首尾的空格
train_story_en = train_story_en.strip()
test_story_en = test_story_en.strip()
dev_story_en = dev_story_en.strip()
# 跟据 split_boundary_up 和 split_boundary_down 来划分数据集的 ent、rel、action
for item in data['ans']['ent']:
    # 将 item 的 type 属性的连字符从 '_' 改为 '-'
    item['type'] = item['type'].replace('_', '-')
    if item['span'][1] <= split_boundary_up: # 在原文本中属于 [0:split_boundary_up] 的部分
        factory_for_ent.appendToList(0, item)
    elif item['span'][0] >= split_boundary_down: # 在原文本中属于 [split_boundary_down:] 的部分
        item['span'][0] -= split_boundary_down
        item['span'][1] -= split_boundary_down
        factory_for_ent.appendToList(2, item)
    else: # 在原文本中属于 [split_boudnary_up:split_boundary_down] 的部分
        item['span'][0] -= split_boundary_up + 1
        item['span'][1] -= split_boundary_up + 1
        factory_for_ent.appendToList(1, item)
for item in data['ans']['rel']:
    # 过滤跨界线的关系
    if max(item['head']['span'][1], item['tail']['span'][1]) > split_boundary_up and min(item['head']['span'][0], item['tail']['span'][0]) <= split_boundary_up or max(item['head']['span'][1], item['tail']['span'][1]) >= split_boundary_down and min(item['head']['span'][0], item['tail']['span'][0]) < split_boundary_down:
        continue
    if max(item['head']['span'][1], item['tail']['span'][1]) <= split_boundary_up:
        factory_for_rel.appendToList(0, item)
    elif min(item['head']['span'][0], item['tail']['span'][0]) >= split_boundary_down:
        item['head']['span'][0] -= split_boundary_down
        item['head']['span'][1] -= split_boundary_down
        item['tail']['span'][0] -= split_boundary_down
        item['tail']['span'][1] -= split_boundary_down
        factory_for_rel.appendToList(2, item)
    else:
        item['head']['span'][0] -= split_boundary_up + 1
        item['head']['span'][1] -= split_boundary_up + 1
        item['tail']['span'][0] -= split_boundary_up + 1
        item['tail']['span'][1] -= split_boundary_up + 1
        factory_for_rel.appendToList(1, item)
for item in data['ans']['action']:

    # 将 event 的所有 args 的 role 转为小写
    for arg in item['args']:
        arg['role'] = arg['role'].lower()
    
    # 为每个 event 的 event-type 加上 "action-" 前缀
    item["event_type"] = "action-" + item["event_type"]

    if item['trigger']['span'][1] <= split_boundary_up:
        factory_for_action.appendToList(0, item)
    elif item['trigger']['span'][0] >= split_boundary_down:
        item['trigger']['span'][0] -= split_boundary_down
        item['trigger']['span'][1] -= split_boundary_down
        # 将 event 的所有 args 的 span 也进行偏移
        for arg in item['args']:
            arg['span'][0] -= split_boundary_down
            arg['span'][1] -= split_boundary_down
        factory_for_action.appendToList(2, item)
    else:
        item['trigger']['span'][0] -= split_boundary_up + 1
        item['trigger']['span'][1] -= split_boundary_up + 1
        # 将 event 的所有 args 的 span 也进行偏移
        for arg in item['args']:
            arg['span'][0] -= split_boundary_up + 1
            arg['span'][1] -= split_boundary_up + 1
        factory_for_action.appendToList(1, item)

# 数据集通用的部分
common_instruction = 'Extract information about the action event from the given text, including trigger words, action event types, subjects, objects, indirect objects, backgrounds, and locations. And if and only if the action event type is \"reference\", please extract its original name and another name.'
common_schema = {
    "ent": [
        "object",
        "indirect-object",
        "env",
        "original-name",
        "another-name",
        "single-abstract",
        "multi-abstract",
        "single-specific-situ-without-a-target",
        "single-specific-situ-with-a-target",
        "single-specific-move-without-a-target",
        "single-specific-move-with-a-target",
        "single-specific-appear",
        "single-specific-situ-leave",
        "single-specific-move-leave",
        "multi-specific-situ-no-touch",
        "multi-specific-situ-touch",
        "multi-specific-move-no-touch",
        "multi-specific-move-touch",
        "reference",
        "emotion",
        "prepare",
        "speak-content",
        "thought-content",
        "status"
    ],
    "rel": [
        "act:subject-of",
        "act:object-of",
        "act:indirect-object-of",
        "act:bkg-of",
        "act:loc-of",
        "act:original-name-of",
        "act:another-name-of",
        "before",
        "synchronization"
    ],
    "event": {
        "action-single-abstract": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-multi-abstract": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-single-specific-situ-without-a-target": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-single-specific-situ-with-a-target": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-single-specific-move-without-a-target": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-single-specific-move-with-a-target": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-single-specific-appear": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-single-specific-situ-leave": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-single-specific-move-leave": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-multi-specific-situ-no-touch": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-multi-specific-situ-touch": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-multi-specific-move-no-touch": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-multi-specific-move-touch": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-reference": [
            "original-name",
            "another-name"
        ],
        "action-emotion": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-prepare": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-speak-content": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-thought-content": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ],
        "action-status": [
            "subject",
            "object",
            "indirect-object",
            "background",
            "location"
        ]
    }
}

# 数据集划分的结果，存储为 3 个 jsonl 文件，分别是 train.jsonl、test.jsonl、dev.jsonl，另外，每个文件中的 json 数据需要压缩成为 1 行
train_dataset = {
    "id": train_id,
    "instruction": common_instruction,
    "schema": common_schema,
    "ans": {
        "ent": factory_for_ent.train_list,
        "rel": factory_for_rel.train_list,
        "event": factory_for_action.train_list
    },
    "text": train_story_en,
    "bg": ""
}

test_dataset = {
    "id": test_id,
    "instruction": common_instruction,
    "schema": common_schema,
    "ans": {
        "ent": factory_for_ent.test_list,
        "rel": factory_for_rel.test_list,
        "event": factory_for_action.test_list
    },
    "text": test_story_en,
    "bg": ""
}

dev_dataset = {
    "id": dev_id,
    "instruction": common_instruction,
    "schema": common_schema,
    "ans": {
        "ent": factory_for_ent.dev_list,
        "rel": factory_for_rel.dev_list,
        "event": factory_for_action.dev_list
    },
    "text": dev_story_en,
    "bg": ""
}

with open(os.path.join(cur_dir, "../train.jsonl"), "w") as f:
    json.dump(train_dataset, f, ensure_ascii=False)

with open(os.path.join(cur_dir, "../test.jsonl"), "w") as f:
    json.dump(test_dataset, f, ensure_ascii=False)

with open(os.path.join(cur_dir, "../dev.jsonl"), "w") as f:
    json.dump(dev_dataset, f, ensure_ascii=False)

    
