from flask import Flask, request, jsonify
from flask import Flask
from flask import logging
import json
# 导入bert客户端
from bert_serving.client import BertClient
import numpy as np
from datetime import datetime
from flask_cors import CORS
from termcolor import colored
app = Flask(__name__)
# app.debug = True
# 解决中文乱码的问题，将json数据内的中文正常显示
app.config['JSON_AS_ASCII'] = False
# 开启debug模式
app.config['DEBUG'] = True
# flask log
log = logging.create_logger(app)


# 构建客户端
bc = BertClient(ip='10.39.65.192')
doc_vecs = []
condinates = []


# 语义相似度训练
@app.route('/semanticsTrain', methods=['POST'])
def semanticsTrain():
    data = {}
    responseData = {"status": 200, "msg": "成功"}
    # 参数获取
    getData = request.get_data()
    list = json.loads(getData).get('questions')
    password = request.headers.get("password")
    # 参数校验
    if len(list) == 0 or password is None:
        responseData["msg"] = "请求参数不能为空"
        responseData["status"] = 400
        log.error("addressExtract 请求参数不能为空")
        return jsonify(responseData), 400
    train(list)
    responseData["data"] = {"msg": "训练中..."}
    return jsonify(responseData), 200


# 语义相似度识别
@app.route('/semanticRecognition', methods=['POST', "GET"])
def semanticRecognition():
    data = []
    responseData = {"status": 200, "msg": "成功"}
    # 参数获取
    text = request.args.get("text")
    password = request.headers.get("password")
    # 参数校验
    if text is None or password is None:
        responseData["msg"] = "请求参数不能为空"
        responseData["status"] = 400
        log.error("addressExtract 请求参数不能为空")
        return jsonify(responseData), 400
    data = getSemanticsSimilar(text)
    responseData["data"] = data
    return jsonify(responseData), 200


def train(data):
    # 从候选集condinates 中选出与sentence_a 最相近的句子
    global condinates
    condinates = data
    # with open("question.txt", encoding="utf-8") as file:
    #     for it in file.readlines():
    #         it = it.strip("\n")
    #         if it.find("##") != -1:
    #             sub = it.split("##")
    #             condinates.extend(sub)
    #         else:
    #             condinates.append(it)
    print('训练数据长度：', len(condinates))
    print('开始训练')
    start = datetime.now()
    global doc_vecs
    doc_vecs = bc.encode(condinates)
    print('训练后的模型数据如下：', doc_vecs)
    end = datetime.now()
    print("训练结束花费时间：", (end - start).seconds, '秒')


def getSemanticsSimilar(text):
    # 从候选集condinates 中选出与sentence_a 最相近的句子
    data = []
    query_vec = bc.encode([text])[0]
    # compute normalized dot product as score
    topk = 5
    global doc_vesc
    global condinates
    print('模型数据：', doc_vecs)
    # score = np.sum(query_vec * doc_vecs, axis=1) / np.linalg.norm(doc_vecs, axis=1)
    # score = np.dot(query_vec, doc_vecs) / (np.linalg.norm(query_vec) * np.linalg.norm(doc_vecs))
    score = []
    # TODO vectorise this yeah I suck sorry
    for doc in doc_vecs:
      cosine_similarity = np.dot(query_vec,doc)/(np.linalg.norm(query_vec)*np.linalg.norm(doc))
      score.append(cosine_similarity)
    topk_idx = np.argsort(score)[::-1][:topk]
    print("猜你可能的问题如下：")
    print("socre:", score)
    print("top_idx", topk_idx)
    for idx in topk_idx:
       #  print('> %s\t%s' % (score[idx], condinates[idx]))
       print(score[idx], colored(condinates[idx], 'yellow'))
       data.append({"score": str(score[idx]), "sentence": condinates[idx]})
    return data

if __name__ == '__main__':
    print('启动中...')
    app.run(host='10.39.65.192', port=5200)
    print('启动成功')


