import os, pdb
import requests
import json
import copy
from typing import List, Tuple
from config import Config
from collections import *
config = Config()

def parse_result(result): #统计满意程度
    obj_num = 0
    score = 0
    score_dict = OrderedDict({"非常满意":1, "不满意":0.21, "没有完成":0.15, "一般":0.66, "满意":0.85})
    for r in result.split('\n'):
        r = r.strip().split('依据', 1)[0]
        for key in score_dict:
            if key in r:
                obj_num += 1
                score += score_dict[key]
                break
    score = score / obj_num * 100
    return score

def prompt_for_chat_model_score(tokenizer, talk1, talk2):
    messages = [
                   {"role": "user", "content": talk1},
                   {"role": "assistant", "content": talk2}
               ]
    if not talk2:
        messages = [messages[0]]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    return text

def make_context(
    tokenizer,
    query: str,
    history: List[Tuple[str, str]] = None,
    system: str = "",
    max_window_size: int = 6144
):
    global config
    im_start, im_end = config.im_start, config.im_end 

    if history is None:
        history = []
    system_text = config.system
    raw_text = ""
    for turn_query, turn_response in reversed(history):
        turn_query = ''
        query_text = turn_query
        response_text = turn_response
        prev_chat = (
            f"\n{im_start} assistant\n{query_text.strip()}\n{response_text} {im_end}"
        )
        raw_text = prev_chat + raw_text
    raw_text = f" {im_start}system\n{system_text} {im_end}" + raw_text
    if len(history) < config.max_turns:
        current_text = f"\n{im_start}user\n{config.template.format(query)}\n{im_end}\n{im_start}assistant"
    else:
        current_text = f"\n{im_start}user\n{config.template_ending.format(query)}\n{im_end}\n{im_start}assistant"

    raw_text += current_text

    return raw_text, current_text


model_url = ""

def prompt_for_chat_model(tokenizer, system_content, prompt):
    messages = [
                   {"role": "system", "content": system_content},
                   {"role": "user", "content": prompt}
               ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    return text

def set_url(url):
    global model_url
    model_url = url

def generate_response(text):
    global model_url
    response = requests.post(model_url, json={'prompt':text})
    return response.text

def save_memory(history_path, history):
    #创建文件夹
    if not os.path.exists(history_path.rsplit('/',1)[0]):
        os.makedirs(history_path.rsplit('/',1)[0])
    #保存用户对话记录
    fw = open(history_path, 'w')
    json.dump(history, fw, ensure_ascii=False, indent=4)

def load_memory(history_path):
    history = []
    if os.path.exists(history_path):
            history = json.load(open(history_path))
    else:
        history = copy.deepcopy(history)
    return history

def coach_response(tokenizer, query, user_id):
    global config
    max_window_size = 3
    #读取历史对话
    history_path=config.history_path.format(user_id)
    history = load_memory(history_path)
    #构建prompt
    raw_text, current_text = make_context(
        tokenizer,
        query,
        history=history,
        system=config.system,
        max_window_size=max_window_size
    )
    #获取模型答案
    print(raw_text)
    response = generate_response(raw_text)
    #更新历史对话并保存
    history.append((current_text, response))
    save_memory(history_path, history)
    return response, history[-1:]

def coach_score_response(tokenizer, user_id):
    global config
    his = json.load(open(config.history_path.format(user_id)))
    text = ''
    for turn in his:
        input_ = turn[0]
        output = turn[1]
        talk1 = input_.replace('：', ':').split(':', 1)[1].split('\n', 1)[0]
        talk2 = output.replace('：', ':').rsplit(':', 1)[1]
        text += '\n' + prompt_for_chat_model_score(tokenizer, talk1=talk1, talk2=talk2).strip().split('\n', 2)[-1]
    text = text.replace('<|im_start|>user\n', '美容师: ').replace('<|im_start|>assistant\n', '顾客: ').replace('<|im_end|>', '')
    input_ = config.template_score.format(text)
    text = prompt_for_chat_model_score(tokenizer, talk1=input_, talk2='')
    response = generate_response(text)
    score = parse_result(response)
    return score