### 读取 ../out/actions.json 输出所有的动作类型

import os
import json
from nltk.tokenize import sent_tokenize

cur_dir = os.path.dirname(os.path.abspath(__file__))

# region functions
# generate story sentence list of positions
def get_story_span_list(story_en):
    sentences = sent_tokenize(story_en)
    start_pos = 0
    end_pos = 0
    story_span_list = []
    for sentence in sentences:
        end_pos = start_pos + len(sentence)
        template_dict = { "span": [start_pos, end_pos], "sentence": sentence }
        story_span_list.append(template_dict)
        start_pos = end_pos + 1 # because of a space exists between sentences
    return story_span_list
# endregion

# 读取每个 ../out/{story_name}/actions.json，将这些动作类型合并到一个 set 里，确保 ../out/{story_name} 是一个文件夹

action_types = set()

for story_name in os.listdir(os.path.join(cur_dir, '../out')):
    if not os.path.exists(os.path.join(cur_dir, '../out', story_name, 'actions.json')):
        continue
    # 获取故事文本
    with open(os.path.join(cur_dir, '../story', f"{story_name}.txt"), 'r') as f:
        story_en = f.read()
    story_span_list = get_story_span_list(story_en)
    # 统计每种类型的动作有多少个
    action_counts = {}
    # 统计每种动作类型都有哪些动作，以及每个动作的上下文
    action_contexts = {}
    with open(os.path.join(cur_dir, '../out', story_name, 'actions.json'), 'r') as f:
        actions = json.load(f)
        print(f"\n---------- ### \"{story_name}\" 共有 {len(actions)} 个动作类型 ### ----------\n")
        for action in actions:
            action_types.add(action['event_type'])
            if action['event_type'] in action_counts:
                action_counts[action['event_type']] += 1
            else:
                action_counts[action['event_type']] = 1

            for story_span in story_span_list:
                if story_span['span'][0] <= action['trigger']['span'][0] and story_span['span'][1] >= action['trigger']['span'][1]:
                    context_start = story_span['span'][0]
                    context_end = story_span['span'][1]
                    break
            if action['event_type'] not in action_contexts:
                action_contexts[action['event_type']] = []
                action_contexts[action['event_type']].append({"action_text":action['trigger']['text'], "action_context": story_en[context_start:context_end]})
            else:
                action_contexts[action['event_type']].append({"action_text":action['trigger']['text'], "action_context": story_en[context_start:context_end]})

    for action_type, count in action_counts.items():
        print(f"动作类型: {action_type}, 数量: {count}")

    # 将每个动作类型都有哪些动作输出到文件
    with open(os.path.join(cur_dir, '../out', story_name, 'action_contexts.json'), 'w') as f:
        json.dump(action_contexts, f, ensure_ascii=False, indent=4)

action_types = list(action_types)
action_types.sort()

# 输出动作类型到文件，每个动作类型一行
with open(os.path.join(cur_dir, '../out/action_types.json'), 'w') as f:
    json.dump(action_types, f, ensure_ascii=False, indent=4)
    print('\n动作类型已输出到 ../out/action_types.json')
    print(f"\n---------- ### 共 {len(action_types)} 个动作类型 ### ----------")