from flask import Flask
from flask import request
from milvus_support import MilvusBase as milvus_support
from sentence_transformers import SentenceTransformer
import json, requests
import os
from datetime import datetime

app = Flask(__name__)


@app.route('/support', methods=["POST"])
def index():
    global id
    data = request.get_data()
    json_data = json.loads(data)
    question = json_data['question']
    history = json_data['history']

    with open(log_dir, 'a') as f:
        f.write(
            "-----------------------------Support 智能客服助手日志, 问题{}----------------------------\n".format(id))
        f.write("问题是：{}\n".format(question))

    ''' 召回器 '''
    # top_k参数
    top_k = 3
    # 将用户输入的问题embedding
    embeddings_1 = model.encode(question, normalize_embeddings=True)
    # 召回
    query_list, answer_list, score_list, text_list, dis_list = qa_text_retriever(embeddings_1, top_k)

    ''' log '''
    with open(log_dir, 'a') as f:
        f.write("--------------------------------------召回信息-----------------------------------------\n")
        f.write("QA召回的问题:\n")
        f.write(str(query_list))
        f.write("\n")
        f.write("QA召回的答案:")
        f.write(str(answer_list))
        f.write("\n")
        f.write("QA召回的分数:\n")
        f.write(str(score_list))
        f.write("\n")
        f.write("文本召回的文本块:\n")
        f.write(str(text_list))
        f.write("\n")
        f.write("文本召回的分数:")
        f.write(str(dis_list))
        f.write("\n")

    '''prompt拼接+响应'''
    # 得分统计
    qa_max_score = float(max(score_list))
    qa_text_max_score = float(max(score_list + dis_list))
    max_score = 0.9
    min_score = 0.5

    if qa_max_score > max_score:
        result = answer_list[0]
    elif qa_text_max_score < min_score:
        result = "抱歉，作为超聚变数字技术有限公司产品的智能客服，我必须保证我的回答专业、严谨。\n您的问题超出了我的认知，不过您可以咨询一下我的其它智能客服同事。"
    else:
        result = prompt_response(min_score, query_list, answer_list,
                                 score_list, text_list, dis_list, top_k, question)

    with open(log_dir, 'a') as f:
        f.write("--------------------------------------响应结果-----------------------------------------\n")
        f.write(result)
        f.write("\n")
        f.write("\n")
        f.write("\n")
    id += 1

    # 生成响应
    msg = {"code": 200, "message": "success", "result": result}
    return msg


def chat_qwen_72B(prompt):
    headers = {
        "Content-Type": "application/json",
        "Authorization": "Bearer sk-ChtJNYJD1sm5FqwA7bE8EfFa3eE847Fa9758E5626d64Cc9a"
    }
    data = {
        "model": "Qwen-72B-Chat",
        "messages": [{"role": "user", "content": prompt}]
    }

    response = requests.post("http://70.182.56.14:11000/v1/chat/completions", data=json.dumps(data),
                             headers=headers).content
    return str(response, encoding='utf-8')

def init_log():
    global log_dir
    global id

    id = 0
    cur_dir = os.path.abspath('.')
    current_datetime = datetime.now()
    formatted_datetime = current_datetime.strftime("%Y-%m-%d-%H:%M:%S")
    log_name = "support." + formatted_datetime + ".log"
    log_dir = cur_dir + '/' + log_name

    with open(log_dir, 'a') as f:
        f.write("-----------Support 智能客服助手日志-----------\n")


def init_model():
    global model, milvus_cli
    model = SentenceTransformer('/model/data/transformers/BAAI/bge-large-zh-v1.5')
    config = {"milvus_host": "70.182.56.3", "milvus_port": 19530, "VECTOR_DIM": 1024, "METRIC_TYPE": "COSINE"}
    milvus_cli = milvus_support(**config)


def qa_text_retriever(embeddings, top_k):
    # QA问答召回的问题、答案、分数
    query_list, answer_list, score_list = milvus_cli.search_question('support_qa', [embeddings], top_k)

    # 文本召回的文本块、分数
    text_list, dis_list = milvus_cli.search_text('support_text', [embeddings], top_k)

    return query_list, answer_list, score_list, text_list, dis_list


def prompt_response(min_score, query_list, answer_list,
                    score_list, text_list, dis_list, top_k, question):
    # 拼接背景知识
    message = "你的背景知识: \n"
    for i in range(top_k):
        if score_list[i] >= min_score:
            message += query_list[i]
            message += "\n"
            message += answer_list[i]
            message += "\n"
        else:
            break

    for i in range(top_k):
        if dis_list[i] >= min_score:
            message += text_list[i]
            message += "\n"

    message += ("对话要求：\n"
                "1. 背景知识是最新的超聚变数字技术有限公司的产品信息，使用背景知识回答问题。\n"
                "2. 优先使用背景知识的内容回答我的问题，答案应与背景知识严格一致。\n"
                "3. 背景知识无法回答我的问题时，可以忽略背景知识，根据你的知识来自由回答。\n"
                "4. 作为超聚变数字技术有限公司的产品客服，使用对话的风格，自然的回答问题。\n"
                "5. 尽可能向客户提供背景知识中的给出的网址、链接。\n"
                "我的问题是:\n")
    message += question

    print(message)
    # 返回结果
    response = chat_qwen_72B(message)
    data = json.loads(response)
    result = data['choices'][0]['message']['content']

    return result


if __name__ == '__main__':
    init_model()
    init_log()
    app.run(host='70.182.42.131',
            port=8282)
