import os
from pprint import pprint
from gensim import similarities
from gensim import models
import jieba
import re
from apps.neo4j.gensim_train.corpus import corpus_raw as documents
from apps.neo4j.InterfaceSet import Neo4jHandler as neo4jHandler
from apps.utils import cosin
from sentence_transformers import SentenceTransformer
import scipy.spatial
from apps.utils import InferSentenceTransformer


def test():
    tfidf = models.TfidfModel.load(r"apps/neo4j/gensim_train/train_result/my_model.tfi")
    index = similarities.MatrixSimilarity.load(r"apps/neo4j/gensim_train/train_result/my_index.index")

    dictionary = tfidf.id2word

    # 相似度查询
    query_document = "减速机 振动 声音 大".split()
    query_bow = dictionary.doc2bow(query_document)
    sims = index[tfidf[query_bow]]


    for document_number, score in sorted(enumerate(sims), key = lambda x:x[1],reverse = True):
        print(document_number, score)


# 返回相似度比较结果
def get_similarity(query_document):
    result = []
    inList = []  # 去重列表
    max_record = 4 # tfidf，consin，sentence-bert中选择相似度最高的几条记录，已经选择过的不会重复选取
    all_record = 0 # 最终返回的总记录条数
    delta = 0.3 # 相似度阈值

    #############################################====tfidf====###################################################
    # 获取字符串 类似 "减速机 振动 声音 大"
    query_seq = list(jieba.cut(query_document, cut_all=False))

    # 处理前
    #print(query_seq)

    # 预处理
    stoplist = set(', . ? ! ; : \' " ， 。 ？ ！ ~ ( ) “ ” 、'.split(' '))
    query_seq = [word for word in query_seq if word not in stoplist]

    # 处理后
    #print(query_seq)

    query_seq = " ".join(query_seq)
    query_list = query_seq.split()

    # tfidf = models.TfidfModel.load(r"D:\code\PycharmProjects\PythonProjects\flaskProject\apps\neo4j\gensim_train\train_result\my_model.tfi")
    # index = similarities.MatrixSimilarity.load(r"D:\code\PycharmProjects\PythonProjects\flaskProject\apps\neo4j\gensim_train\train_result\my_index.index")
    tfidf = models.TfidfModel.load(r"apps/neo4j/gensim_train/train_result/my_model.tfi")
    index = similarities.MatrixSimilarity.load(r"apps/neo4j/gensim_train/train_result/my_index.index")

    dictionary = tfidf.id2word

    # 相似度查询
    query_bow = dictionary.doc2bow(query_list)
    sims = index[tfidf[query_bow]]

    putInNum = 0
    print("======================tfidf====================")
    print(sims)

    for document_number, score in sorted(enumerate(sims), key=lambda x: x[1], reverse=True):
        if score < delta:
            break
        # print(documents[document_number], score)
        if putInNum < max_record:
            if documents[document_number] not in inList:
                inList.append(documents[document_number])
                result.append((documents[document_number], score))
                putInNum += 1
                all_record += 1
        else:
            break



    ########################################====cosin.sentence_resemble====##############################################
    handler = neo4jHandler.Neo4j_Handle()
    handler.connectDB()
    pattern = r'\.|;|。|；|！|,|，'
    similarxianxiang = re.split(pattern, query_document)
    # print("similarxianxiang",similarxianxiang)########################################
    print("======================cosin.sentence_resemble====================")
    for i,xianxiang in enumerate(similarxianxiang):
        similarlist = {}
        for xianxiangdb in handler.matchAllPhenomenon():
            similar = cosin.sentence_resemble(xianxiang, xianxiangdb)
            similarlist[xianxiangdb] = similar
        pprint(similarlist)  ########################################

        putInNum = 0
        for xianxiangdb, score in sorted(similarlist.items(), key=lambda x: x[1], reverse=True):
            if score < delta:
                break
            if putInNum < max_record:
                if xianxiangdb not in inList:
                    inList.append(xianxiangdb)
                    result.append((xianxiangdb, score))
                    putInNum += 1
                    all_record += 1
                    print(xianxiangdb)
            else:
                break



    ########################################====sentence-bert====#############################################
    print("======================sentence-bert====================")

    curPath = os.path.abspath(os.path.dirname(__file__))
    rootPath = curPath[:curPath.find("knowledge_back-master\\") + len("knowledge_back-master\\")]
    dataPath = os.path.abspath(rootPath + 'apps\\models\\test_output')
    # print(dataPath)

    ifsbert = InferSentenceTransformer.InferSentenceTransformer(
        # "/home/user_huzheng/documents/quick_sentence_transformers/models/paraphrase-multilingual-MiniLM-L12-v2",
        model_name_or_path=dataPath,
        # device='cuda',
        device='cpu',
        onnx_model_name="test_onnxmodel02",
        enable_overwrite=False
    )

    # embedder = SentenceTransformer('apps/models/test_output/')
    # embedder = SentenceTransformer('bert-base-chinese')

    corpus = documents
    corpus_embeddings = ifsbert.encode(corpus)
    # 待查询的句子
    # queries = ['过热', '过冷', '过', '今天天气真好啊哈哈哈哈哈哈']
    query_embeddings = ifsbert.encode(similarxianxiang)
    query_embeddings = query_embeddings.numpy()

    # 对子每个句子，使用佘弦相似度查询最接近的n个句子

    for xianxiangdb, query_embedding in zip(similarxianxiang, query_embeddings):
        distances = scipy.spatial.distance.cdist([query_embedding], corpus_embeddings, "cosine")[0]
        # 按照距离逆序
        results = zip(range(len(distances)), distances)
        results = sorted(results, key=lambda x: x[1])
        putInNum = 0
        # print("============================")
        # print("Query : ", xianxiangdb)
        # print("Result:Top 5 most similar sentences in corpus : ")
        for idx, distance in results:
            print(corpus[idx].strip(), "(Score: %.4f)" % (1 - distance))
            if 1 - distance < delta:
                # break
                continue
            if putInNum < max_record:
                if corpus[idx].strip() not in inList:
                    putInNum += 1
                    all_record += 1
                    result.append((corpus[idx].strip(), 1 - distance))
                    # print(corpus[idx].strip(), "(Score: %.4f)" % (1 - distance))
            else:
                # break
                continue
    result = sorted(result, reverse=True)
    return result, all_record

