import copy
import datetime
import glob
import json
import os
import re
import sys

from collections import OrderedDict, deque
import datasets
from os.path import realpath, dirname

root_path = '/'.join(dirname(realpath(__file__)).split('/')[:-2])
sys.path.insert(1, root_path)

from src.all_prompts import *
from src.annotate.utils import extract_explanation, NO_REPEAT_ACTIONS
from src.models.baichuan2 import BaichuanTokenizer, BaichuanConfig
from src.models.qwen import QWenTokenizer, QWenConfig

CONTENT, ACTION, EXPLAIN, ACTION_OBJECT = '内容', '动作', '动作解释', '动作对象'

EVENT, EMOTION, BEHAVIOR_RECOGNITION, RESOURCES, EXPECT, SHORT_FOCUS = \
    '事件', '情绪', '行为认知', '现实资源', '期望期待', '短焦技巧'

USE_ALL = False
USE_HISTORY_ACTIONS = True
USE_EXPALIN = False

CURRENT_TURN_STATES = "当前轮状态"
ACTION_RESPONSE = "动作回复"

SUBSTITUTE_DICT = {
    '总结或重复': '总结',
    '共情/同理': '反馈',
    '澄清': '追问模糊信息',
    '提问那我们可以一起探索一下。可以和我说说你最近的生活吗？': '提问来访者最近的生活情况'
}


def create_global_state_dict():
    global_state_dict = {
        EVENT: [],  # 事例
        EMOTION: [],  # 情绪
        BEHAVIOR_RECOGNITION: [],  # 行为认知
        RESOURCES: [],  # 现实资源
        EXPECT: [],  # 期望期待
        SHORT_FOCUS: []  # 短焦技巧
    }
    return global_state_dict


def cut_len(text, max_len=1000):
    if len(text) > max_len:
        states_list = re.split(r'(\n+)', text)
        temp_str = ''
        for item in reversed(states_list):
            if len(temp_str + item) > max_len:
                break
            else:
                temp_str = item + temp_str

        return temp_str
    else:
        return text


def update_global_states(global_states_dict, current_states):
    current_states_list = re.split('\n+', current_states)

    for cur_st in current_states_list:
        mach_ = re.match(f"({EVENT}|{EMOTION}|{BEHAVIOR_RECOGNITION}|{RESOURCES}|{EXPECT}|{SHORT_FOCUS})", cur_st)
        if mach_:
            global_states_dict[mach_.group()].append(cur_st)


def convert_global_states_dict_to_str(global_states_dict):
    global_states_str_list = []
    for _, objectives in global_states_dict.items():
        if objectives != []:
            cur_class_str = '\n'.join(objectives)
            global_states_str_list.append(cur_class_str)

    if global_states_str_list:
        final_global_states_str = '\n\n'.join(global_states_str_list)
    else:
        final_global_states_str = ''
    return final_global_states_str


def preprocess_data():
    all_data = []
    error_data = []
    num = 0
    re_files = '/workspace/psycho_trainning/data/raw_data/action_response_data/*.json'
    file_paths = glob.glob(re_files)

    all_actions_set = set()
    for file in file_paths:
        file_date = file.split('/')[-2]

        corpus = json.load(open(file, 'r', encoding='utf-8'))
        try:
            dialog = corpus['dialog']
        except:
            print()
        global_state_dict = create_global_state_dict()
        history = ''
        history_actions = []
        # 设置一个前5轮actions
        queue = deque(maxlen=5)

        for turn in dialog:
            session_id = turn['session_id']
            turn_id = turn['turn_id']
            user_content = turn['user_content']
            bot_content = turn['bot_response']

            history += f'来访者：{user_content}\n'
            states = turn['states']

            if USE_ALL:
                if turn['edit'] == False and 'skip' in turn and turn['skip'] == 1:
                    continue
                actions = turn['new_action'] if 'new_action' in turn else turn['actions']
                model_output = turn['new_outputs'] if 'new_outputs' in turn else turn['model_output']
            else:
                if turn['edit'] == True or (turn['edit'] == False and turn['skip'] == 2):
                    actions = turn['new_action']
                    model_output = turn['new_outputs']

                else:
                    continue

            new_actions = []
            tuple_action = ()

            for a in actions:
                action_item = {}
                if a['action_name'] == '':
                    error_dict = {
                        'session_id': session_id,
                        'turn_id': turn_id,
                        'usr_content': user_content,
                        'bot_response': bot_content,
                        'file_date': file_date,
                        'error_position': 'action'
                    }
                    error_data.append(error_dict)

                action_name = a['action_name']
                if action_name in SUBSTITUTE_DICT.keys():
                    print('old_name', action_name)
                    action_name = SUBSTITUTE_DICT[action_name]
                    print('new_name', action_name)
                tuple_action += (action_name,)
                all_actions_set.add(action_name)
                action_item['动作'] = action_name
                action_object_list = a['action_object']
                new_action_object_list = []

                for ao in action_object_list:
                    ao_item = {}
                    ao_item['对象名称'] = ao['object_name']
                    ao_item['对象概括'] = ao['object_summary']
                    new_action_object_list.append(ao_item)

                    if ao['object_name'] == '1[]':
                        ao['object_name'] = ""

                    if ao['object_name'] == "":
                        error_dict = {
                            'session_id': session_id,
                            'turn_id': turn_id,
                            'usr_content': user_content,
                            'bot_response': bot_content,
                            'file_date': file_date,
                            'error_position': 'object_name'
                        }
                        error_data.append(error_dict)

                action_item['动作解释'] = extract_explanation(a['action_name'])
                action_item['动作对象'] = new_action_object_list
                if a['solution_method'] != '':
                    action_item['解决方式'] = a['solution_method']

                new_actions.append(action_item)

            # 在队列中加入动作元组
            queue.append(tuple_action)
            # print(queue)
            new_outputs = []
            for o in model_output:
                o_item = {}
                if o['content'] == '':
                    error_dict = {
                        'session_id': session_id,
                        'turn_id': turn_id,
                        'usr_content': user_content,
                        'bot_response': bot_content,
                        'file_date': file_date,
                        'error_position': 'content'
                    }
                    error_data.append(error_dict)
                o_item['内容'] = o['content'].replace(' ', '')
                o_item['动作'] = o['action']

                new_outputs.append(o_item)

            global_state_str = convert_global_states_dict_to_str(global_state_dict)
            update_global_states(global_state_dict, turn['states'])

            data_item = {
                'history': copy.deepcopy(history),
                'history_actions': copy.deepcopy(history_actions),
                'role': '咨询师',
                'global_states': global_state_str,
                'current_states': turn['states'],
                'actions': copy.deepcopy(new_actions),
                'outputs': copy.deepcopy(new_outputs)
            }

            for action in new_actions:
                action_name = action['动作']
                if action_name not in history_actions and action_name in NO_REPEAT_ACTIONS:
                    history_actions.append(action_name)
                    # print(history_actions)

            all_data.append(data_item)
            history += f'咨询师：{bot_content}\n'

    if error_data:
        import pandas as pd
        pd.DataFrame(error_data).to_excel('error_data.xlsx')
    return all_data


def package_data(all_data):
    package_datas = []

    for data in all_data:
        if USE_HISTORY_ACTIONS:
            input = {
                'history': cut_len(data['history']),
                'history_actions': data['history_actions'],
                'role': data['role'],
                'prev_global_states': cut_len(data['global_states'])
            }
        else:
            input = {
                'history': cut_len(data['history']),
                'role': data['role'],
                'prev_global_states': cut_len(data['global_states'])
            }

        action_dict_list = data['actions']
        response_dict_list = data['outputs']
        new_output_dict_list = []
        for ad, ar in zip(action_dict_list, response_dict_list):
            item = OrderedDict()

            # normal
            item[ACTION] = ad[ACTION]
            if USE_EXPALIN:
                item[EXPLAIN] = ad[EXPLAIN]
            item[ACTION_OBJECT] = ad[ACTION_OBJECT]
            item[CONTENT] = ar[CONTENT]
            new_output_dict_list.append(item)

        # 顺序0
        output = {
            CURRENT_TURN_STATES: data['current_states'],
            ACTION_RESPONSE: new_output_dict_list
        }
        if USE_HISTORY_ACTIONS:
            input0 = DATA_COACH_WITH_GLOBAL_STATES_WITH_HISTORY_ACTIONS.format(input)
            data_item = {
                'input': input0,
                'history_actions': data['history_actions'],
                'output': output,
            }
        else:
            input0 = DATA_COACH_WITH_GLOBAL_STATES.format(input)
            data_item = {
                'input': input0,
                'output': output,
            }
        package_datas.append(data_item)

    return package_datas


def tokenize_corpus(all_package_data, tokenizer, config):
    def token_example(tokenizer, config, example):
        input, output = example.get('input', ''), example.get('output', '')
        if not isinstance(output, str):
            output = json.dumps(output, ensure_ascii=False)
        x = tokenizer.encode(input, add_special_tokens=True)
        if x[-1] == config.eos_token_id:
            x = x[:-1]

        # target [t1, t2 ...] <eos>
        y = tokenizer.encode(output, add_special_tokens=False)
        if y[-1] != config.eos_token_id:
            y = y + [config.eos_token_id]

        total_ids = x + y
        context_len = len(x)
        output = {"input_ids": total_ids, "context_len": context_len}
        return output

    def read_json(corpus, config):
        for example in corpus:
            feature = token_example(tokenizer, config, example)
            yield feature

    dataset = datasets.Dataset.from_generator(
        lambda: read_json(all_package_data, config)
    )
    return dataset


if __name__ == "__main__":
    all_data = preprocess_data()

    print(f'total data num is: {len(all_data)}')
    all_package_data = package_data(all_data)

    date = datetime.datetime.now().strftime("%m%d")
    model_name = 'qwen'
    
    if model_name == 'baichuan':
        pretrained_model_path = '/workspace/psycho/resources/Baichuan2-13B-chat'
        tokenizer = BaichuanTokenizer.from_pretrained(pretrained_model_path)
        config = BaichuanConfig.from_pretrained(pretrained_model_path)
    else:
        pretrained_model_path = '/workspace/psycho_trainning/resources/Qwen-14B-Chat'
        tokenizer = QWenTokenizer.from_pretrained(pretrained_model_path)
        config = QWenConfig.from_pretrained(pretrained_model_path)


    output_dir = '/workspace/psycho/data/package_data'
    os.makedirs(output_dir, exist_ok=True)
    if USE_ALL:
        output_path = os.path.join(output_dir, f'train_all_data_{date}.json')
    else:
        output_path = os.path.join(output_dir, f'train_modified_{date}.json')
    json.dump(all_package_data, open(output_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=4)

    dataset = tokenize_corpus(all_package_data, tokenizer, config)
    output_dir = '/workspace/psycho/data/tokenized_data'
    os.makedirs(output_dir, exist_ok=True)

    if USE_ALL:
        output_path = os.path.join(output_dir, f'train_data_all_{model_name}_{date}')
    else:
        output_path = os.path.join(output_dir, f'train_data_only_modified_{model_name}_{date}')

    dataset.save_to_disk(output_path)
