from flask import Flask, request
import faiss  
import pymongo as pm
from PyCmpltrtok.common import md5, sep
from PyCmpltrtok.util_mongo import get_sorted_by_key, VALUE, KEY, get_history
from python_nlp.embed.cblue_text2mongo import USERNAME, EMBED_COL
from python_nlp.embed.cblue_text2mongo import EMBED_TBL as EMBED_TBL_TEXT
from python_nlp.kg.neo4j.load_data.data2mongo import EMBED_TBL as EMBED_TBL_KG
from python_nlp.kg.neo4j.load_data.data2mongo_words import EMBED_TBL as EMBED_TBL_KG_WORDS
from transformers import AutoModel, AutoTokenizer
from transformers import AutoConfig
import numpy as np
import sys
import torch
import time
import os
import logging
import argparse
from PyCmpltrtok.auth.mongo.conn import conn

logger = logging.getLogger(__name__)

dev, model, tokenizer, max_len = None, None, None, None
global_cache_map = None
index, xsentences, xmd5s = None, None, None

app = Flask(__name__)


def load_embed_model(idx_gpu, model_name):
    # 加载文本BERT
    logger.info('Loading the BERT model')
    dev = torch.device(idx_gpu)
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(model_name).to(dev)
    # 确定最大token长度
    xlen01 = tokenizer.model_max_length
    config = AutoConfig.from_pretrained(model_name)
    xlen02 = config.max_position_embeddings
    max_len = min(xlen01, xlen02)
    logger.info('Loaded.')
    return dev, model, tokenizer, max_len


def build_search_index(mongo_link, xembed_table):
    mongo = conn(mongo_link)
    mdb = mongo['CBLUE']
    logger.info('%r', mdb)
    get_history(mdb, 'u_try_it', limit=1)  # 测试mongodb的有效性

    # FAISS索引
    d = 768

    # 多种索引 https://github.com/facebookresearch/faiss/wiki/Faiss-indexes
    # index = faiss.IndexFlatIP(d)   # 余弦相似度
    index = faiss.IndexFlatL2(d)   # 欧式距离

    logger.info('Index OK')
    logger.info('index.is_trained: %d', index.is_trained)

    # 从mongodb加载数据到FAISS
    logger.info('Loading from mongo %s', xembed_table)
    xrows = get_sorted_by_key(mdb, xembed_table, USERNAME)
    xlist = []
    xsentences = []
    xmd5s = []
    for i, xrow in enumerate(xrows):
        xembed = xrow.get(EMBED_COL, None)
        if not xembed:
            print('!', end='')
            continue
        xlist.append(xembed)
        xsentences.append(xrow[VALUE])
        xmd5s.append(xrow[KEY])
        print('.', end='')
        if i % 1000 == 0:
            print(i)
    print()
    if not xlist:
        print('No data found!')
        sys.exit(1)
    xlist_np = np.array(xlist, dtype=np.float32)
    # faiss.normalize_L2(xlist_np)  # 余弦相似度需要归一化
    index.add(xlist_np)                  # add vectors to the index
    print(f'Loaded {len(xlist)} rows of data.')
    return index, xsentences, xmd5s


@app.route("/api", methods=['POST'])
def do_infer():
    """装饰器@app.route定义了在/api上的HTTP POST接口。"""
    # 获取输入字典
    req_json = request.get_json(force=True)
    # 获取输入
    xinput = req_json['input'].strip()
    k = int(req_json['k'])
    xmd5 = md5(xinput)
    sep(xinput)
    print('md5:', xmd5)

    # 嵌入
    ts1 = time.time_ns()
    xkey_vector_list = global_cache_map.get(xmd5, None)
    if xkey_vector_list is not None:
        print('【使用嵌入缓存】')
    else:
        ids = tokenizer(
            xinput,
            max_length=max_len,
            truncation=True,
            padding=False,
            # padding='max_length',  # https://stackoverflow.com/questions/70067608/how-padding-in-huggingface-tokenizer-works
            # https://stackoverflow.com/questions/61443480/huggingfaces-bert-tokenizer-not-adding-pad-token
            return_tensors='pt',
        )
        with torch.no_grad():
            xout = model(**{k: v.to(dev) for k, v in ids.items()})
            embed = xout['pooler_output'].cpu()
        xkey_vector_list = embed.numpy()
        global_cache_map[xmd5] = xkey_vector_list
    ts2 = time.time_ns()
    print('（嵌入）向量化 in ms:', (ts2 - ts1) / 1e6)

    ts1 = time.time_ns()
    # faiss.normalize_L2(xkey_vector_list)  # 余弦相似度需要归一化
    D, I = index.search(xkey_vector_list, k)
    D = D.tolist()
    I = I.tolist()
    D = D[0]
    I = I[0]
    ts2 = time.time_ns()
    print('搜索 in ms:', (ts2 - ts1) / 1e6)

    # 组织返回字典
    res_json = dict()
    res_json['I'] = I
    res_json['D'] = D
    res_json['sentences'] = [xsentences[i] for i in I]
    res_json['md5s'] = [xmd5s[i] for i in I]

    # 返回
    return res_json
    
    
if '__main__' == __name__:
    
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--host', help='Host name of this service.', type=str, default='0.0.0.0')
    parser.add_argument('--port', help='Port of this service.', type=str, default='7700')
    parser.add_argument('--mongo-link', help='Config name of the mongodb.', type=str, default='local', dest='mongo_link')
    parser.add_argument('--mongo-table', help='Table name of the mongodb.', type=str, default=EMBED_TBL_KG_WORDS, dest='mongo_table')

    args = parser.parse_args()
    mongo_link = args.mongo_link
    mongo_table = args.mongo_table
    host = args.host
    port = int(args.port)

    if 0:
        # model_name = 'bert-base-chinese'  # 用模型名，需联网，需翻墙
        model_name = 'moka-ai/m3e-base'  # 用模型名，需联网，需翻墙
    else:
        # model_name = r'C:\Users\peter\.cache\huggingface\hub\models--bert-base-chinese\snapshots\8d2a91f91cc38c96bb8b4556ba70c392f8d5ee55'  # 使用下载后的路径，不需联网
        # model_name = '/home/peiyp2004/.cache/huggingface/hub/models--bert-base-chinese/snapshots/8d2a91f91cc38c96bb8b4556ba70c392f8d5ee55'  # 使用下载后的路径，不需联网
        # model_name = '/mnt/d/_dell7590_root/sync/1_usb/N1/large_sci.com.models/hf/m3e-base'  # WSL
        model_name = '/home/yunpeng/models/hf/m3e-base'
        # model_name = r'D:\_const\wsl\my_github\m3e-base'

    dev, model, tokenizer, max_len = load_embed_model(1, model_name)
    global_cache_map = dict()
    index, xsentences, xmd5s = build_search_index(mongo_link, mongo_table)
    
    app.run(host, port)
