import argparse
import json
from json.decoder import JSONDecodeError
import time
import os
import flask
from flask import request
from FlagEmbedding import BGEM3FlagModel
from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
import requests
import logging
app = flask.Flask(__name__)
embedding = None
tokenizer_map = {
    # "model_name" :  tokenizer
}
from args import *
def construct_template(model_name, instruct, content):
    messages = [
            {"role": "user", "content": f"{instruct}\n{content}"}
        ]
    if "chatglm3" in model_name:
        messages = [
            {"role": "<|system|>", "content": "你是一个有用的助手。"},
            {"role": "<|user|>", "content": f"{instruct}\n{content}"}
            # {"role": "<|assistant|>", "content": f"}
        ]
    elif "qwen1.5" in model_name:
        messages = [
            {"role": "system", "content": "你是一个有用的助手。"},
            {"role": "user", "content": f"{instruct}\n{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "baichuan2" in model_name:
        messages = [
            {"role": "<reserved_106>", "content": f"{instruct}\n{content}"}
            # {"role": "<reserved_107>", "content": f"}
        ]
    elif "internlm2" in model_name:
        messages = [
            {"role": "system", "content": "你是一个有用的助手。"},
            {"role": "user", "content": f"{instruct}\n{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "deepseek" in model_name:
        messages = [
            {"role": "User", "content": f"{instruct}\n{content}"}
            # {"role": "Assistant", "content": f"}
        ]
    elif "yi" in model_name:
        messages = [
            {"role": "user", "content": f"{instruct}\n{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "mistral" in model_name:
        messages = [
            {"role": "[INST]", "content": "你是一个有用的助手。"},
            {"role": "[INST]", "content": f"{instruct}\n{content}"}
            # {"role": "[/INST]", "content": f"}
        ]
    elif "llama2" in model_name:
        messages = [
            {"role": "[INST] <<SYS>>", "content": "你是一个有用的助手。"},
            {"role": "[INST]", "content": f"{instruct}\n{content}"}
            # {"role": "[/INST]", "content": f"}
        ]
    elif "llama3" in model_name:
        messages = [
            {"role": "system", "content": "你是一个有用的助手。"},
            {"role": "user", "content": f"{instruct}\n{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "gemma" in model_name:
        messages = [
            {"role": "user", "content": f"{instruct}\n{content}"}
        ]
    return messages

def do_request(model_name, messages, max_new_tokens=512):
    tokenizer = tokenizer_map.get(model_name, None)
    if tokenizer is None:
        tokenizer = AutoTokenizer.from_pretrained(f"{MODEL_PATH}/{model_name}", trust_remote_code=True, use_fast=False)
        tokenizer_map[model_name] = tokenizer
    text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    resp = requests.post("http://127.0.0.1:8898/control/call", json={"model": model_name, "text": text, "max_new_tokens": max_new_tokens})
    data = json.loads(resp.text)
    result = data.get("data", "").replace("<|im_end|>", "")
    # result = result[len(text):]
    return result

#  92 93 相同基座模型， PEFT训练不同模块 
#  8k 
#  qwen1.5 + LoRA 模块1 / 
def news_summary(model_name, news, ):
    messages = construct_template(model_name, "请用不超过100字概括下面新闻：", news) # 微调模板 150条 1000条
    text = do_request(model_name, messages)
    return text


# LoRA 模块2 / 
def event_summary(model_name, news, ):
    messages = construct_template(model_name, "请按照一定逻辑汇总下面新闻的主要内容，不超过100字：", news) # 微调模板 150条 1000条
    text = do_request(model_name, messages)
    return text


def update_record(post_data, result):
    fname = os.path.join(
        "../record", time.strftime("data_%Y_%m_%d.json", time.localtime()))
    data = []
    if os.path.exists(fname):
        with open(fname, "r+") as f:
            data = json.load(f)
    with open(fname, "w+") as f:
        data.append({"post_data": post_data, "summary": result})
        json.dump(data, f, ensure_ascii=False, indent=4)
    return


def filter(newslist):
    newslist = [news for news in newslist if len(news) >= 128] 
    if len(newslist) == 0:
        return []
    # 100 news -> 100*1024 vect
    # vect.T * vect  100*100 
    vects = embedding.encode(newslist, batch_size=12, max_length=8192, )['dense_vecs']
    keylist, keyset = [], set()
    for i, v in enumerate(vects):
        if sum([(vects[k]@v) >= 0.82 for k in keyset]) == 0:
            keylist.append(newslist[i])
            keyset.add(i)
    return keylist


@app.route('/<string:model_name>/chat', methods=['POST'])
def show_post(model_name):
    try:
        newslist = filter(["%s\n%s" % (item["title"], item["content"]) for item in request.json.get("newsList", [])])
        if len(newslist) == 1:
            return {"code": 0, "message": "操作成功！", "data": "\n".join(newslist)}
        if len(newslist) > 20:
            # 选出最有信息量的10篇 TODO 主动学习 一些方法 
            # 目前是覆盖范围尽可能广， 新闻之间的分布尽可能均匀
            newslist = [newslist[i] for i in range(0, len(newslist), len(newslist)//10)]

        abstract = [news_summary(model_name, news, ) for news in newslist]

        if len(abstract) == 0:
            abstract = [" ".join([item["title"] for item in request.json.get("newsList", [])])]

        result = event_summary(model_name, "\n".join(abstract),)
        update_record(request.json, result)
        return json.dumps({"code": 0, "message": "操作成功！", "data": result}, indent=4, ensure_ascii=False)
    except Exception as e:
        return {"code": -1, "message": f"{e}", "data": ""}

#  http://10.26.32.91/record/data_2022_03_12.json


@app.route('/record/<string:file_name>', methods=['GET'])
def show_file(file_name):
    try:
        with open(os.path.join("../record", file_name)) as f:
            j = json.load(f)
        return {"code": 0, "message": "操作成功！", "data": j}
    except Exception as e:
        return {"code": -2, "message": f"{e}", "data": ""}


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--port", type=int, default=8899)
    args = parser.parse_args()

    embedding = BGEM3FlagModel(f'{MODEL_PATH}/bge-m3', use_fp16=True, device="cuda:0")

    app.run(host="0.0.0.0", port=args.port)
