from flask import Flask, request
import pymongo as pm
from PyCmpltrtok.util_mongo import get_sorted_by_key, VALUE, KEY, get_history
from python_nlp.embed.cblue_text2mongo import USERNAME, EMBED_COL
from python_nlp.embed.cblue_text2mongo import EMBED_TBL as EMBED_TBL_TEXT
from python_nlp.kg.neo4j.load_data.data2mongo import EMBED_TBL as EMBED_TBL_KG
from python_nlp.kg.neo4j.load_data.data2mongo_words import EMBED_TBL as EMBED_TBL_KG_WORDS
from transformers import AutoModel, AutoTokenizer
from transformers import AutoConfig
import numpy as np
import sys
import torch
import time

if '__main__' == __name__:

    # 加载文本BERT
    print('Loading the BERT model')
    dev = torch.device(0)
    if 0:
        # model_name = 'bert-base-chinese'  # 用模型名，需联网，需翻墙
        model_name = 'moka-ai/m3e-base'  # 用模型名，需联网，需翻墙
    else:
        # model_name = r'C:\Users\peter\.cache\huggingface\hub\models--bert-base-chinese\snapshots\8d2a91f91cc38c96bb8b4556ba70c392f8d5ee55'  # 使用下载后的路径，不需联网
        # model_name = '/home/peiyp2004/.cache/huggingface/hub/models--bert-base-chinese/snapshots/8d2a91f91cc38c96bb8b4556ba70c392f8d5ee55'  # 使用下载后的路径，不需联网
        # model_name = '/home/peiyp2004/my_github/m3e-base'
        model_name = r'D:\_const\wsl\my_github\m3e-base'
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(model_name).to(dev)
    # 确定最大token长度
    xlen01 = tokenizer.model_max_length
    config = AutoConfig.from_pretrained(model_name)
    xlen02 = config.max_position_embeddings
    max_len = min(xlen01, xlen02)
    print('Loaded.')

    # Flask实例
    app = Flask(__name__)

    def build_search_index(xembed_table):
        # 连接Mongodb
        if 1:
            IP = '127.0.0.1'
        else:
            IP = '172.20.240.1'  # IP of WSL's Windows host 【每次开WSL都会变！】
        mongo = pm.MongoClient(IP, 27017, serverSelectionTimeoutMS=3000)
        mdb = mongo['CBLUE']
        print(mdb)
        get_history(mdb, 'u_try_it', limit=1)  # 测试mongodb的有效性

        # 从mongodb加载数据
        print('Loading from mongo ...', xembed_table)
        xrows = get_sorted_by_key(mdb, xembed_table, USERNAME)
        xlist = []
        xsentences = []
        xmd5s = []
        for i, xrow in enumerate(xrows):
            xembed = xrow.get(EMBED_COL, None)
            if not xembed:
                print('!', end='')
                continue
            xlist.append(xembed)
            xsentences.append(xrow[VALUE])
            xmd5s.append(xrow[KEY])
            print('.', end='')
            if i % 50 == 0:
                print(i)
        print()
        if not xlist:
            print('No data found!')
            sys.exit(1)
        xlist_np = np.array(xlist, dtype=np.float32)
        print(f'Loaded {len(xlist)} rows of data.')
        return xlist_np, xsentences, xmd5s

    @app.route("/api", methods=['POST'])
    def do_infer():
        """装饰器@app.route定义了在/api上的HTTP POST接口。"""
        # 获取输入字典
        req_json = request.get_json(force=True)
        # 获取输入
        xinput = req_json['input']
        k = int(req_json['k'])
        ids = tokenizer(
            xinput,
            max_length=max_len,
            truncation=True,
            padding=False,
            # padding='max_length',  # https://stackoverflow.com/questions/70067608/how-padding-in-huggingface-tokenizer-works
            # https://stackoverflow.com/questions/61443480/huggingfaces-bert-tokenizer-not-adding-pad-token
            return_tensors='pt',
        )
        with torch.no_grad():
            xout = model(**{k: v.to(dev) for k, v in ids.items()})
            embed = xout['pooler_output'].cpu()
        xkey_vector_list = embed.numpy()

        # 手写搜索底层
        ts1 = time.time_ns()
        print('xlist_np:', xlist_np.shape)
        print('xkey_vector_list:', xkey_vector_list.shape)
        diff = xlist_np - xkey_vector_list
        print('diff:', diff.shape)
        diff2 = diff ** 2
        print('diff2:', diff2.shape)
        diff2_sum = diff2.sum(axis=-1)
        print('diff2_sum:', diff2_sum.shape)
        # diff2_sum_rt = diff2_sum ** 0.5
        # print('diff2_sum_rt:', diff2_sum_rt.shape)
        D = diff2_sum
        print('D:', D.shape)

        # 排序索引
        I = np.argsort(D)
        print('I:', I.shape)

        # 取前k个索引
        I = I[:k]
        print('I:', I.shape)

        # 用索引排序并取得前k个距离
        D = D[I]
        print('D:', D.shape)

        ts2 = time.time_ns()
        print('Duration in ms:', (ts2 - ts1) / 1e6)

        D = D.tolist()
        I = I.tolist()

        # 组织返回字典
        res_json = dict()
        res_json['I'] = I
        res_json['D'] = D
        res_json['sentences'] = [xsentences[i] for i in I]
        res_json['md5s'] = [xmd5s[i] for i in I]

        # 返回
        return res_json

    xlist_np, xsentences, xmd5s = build_search_index(EMBED_TBL_KG_WORDS)
    app.run('0.0.0.0', 7759)
