# -*- coding: utf-8 -*-
# @Author  : 曾诗诗
# @Time    : 2022/5/9 15:26
# @Function:引用清华分词工具包 -- THULAC

import thulac
from sklearn.feature_extraction.text import TfidfVectorizer

# 引入停用此表，存为数组
stpwrd_dic = open(r"stop_words.txt", 'rb')
stpwrd_content = stpwrd_dic.read()
# 将停用词表转换为list
stpwrdlst = stpwrd_content.splitlines()
stpwrd_dic.close()

thu1 = thulac.thulac(seg_only=True)  # 默认模式
texts = []

def cuttext(rows):
    # 对每一本书的简介分词
    for row in rows:
        text = thu1.cut(row[4], text=True)
        texts.append(text)

def splitebook(rows,searchinfo):
    if(texts==[]):
        cuttext(rows)
     # 输入文本分词
    text=thu1.cut(searchinfo, text=True)
    if len(texts)>len(rows) :
        texts[len(texts)-1]=text
    else:
        texts.append(text)
    #文本向量化
    vector = TfidfVectorizer(stop_words=stpwrdlst)
    tfidf = vector.fit_transform(texts)
    wordlist = vector.get_feature_names()  # 获取词袋模型中的所有词
    # print(indexs)
    # tf-idf矩阵 元素a[i][j]表示j词在i类文本中的tf-idf权重
    weightlist = tfidf.toarray()
    simbookindex=[]
    lens=len(weightlist)
    for j in range(len(wordlist)):
        if weightlist[lens-1][j] != 0:
            for i in range(lens-1):
                if weightlist[i][j] != 0 and i not in simbookindex:
                    simbookindex.append(i)
    scores = {}
    for index in simbookindex:
        dot_product = 0.0
        normA = 0.0
        normB = 0.0
        for a, b in zip(weightlist[index], weightlist[lens - 1]):
            dot_product += a * b
            normA += a ** 2
            normB += b ** 2
        if normA == 0.0 or normB == 0.0:
            return 0
        else:
            score = round(dot_product / ((normA ** 0.5) * (normB ** 0.5)) * 100, 2)
        if (score != 0):
            scores[index] = score
    # simbookweightlist=[]
    # for bookindex in simbookindex:
    #     simbookweightlist.append(weightlist[bookindex])
    # simbookweightlist.append(weightlist[lens-1])
    # 打印每类文本的tf-idf词语权重，第一个for遍历所有文本，第二个for便利某一类文本下的词语权重
    # for i in range(len(weightlist)):
    #     print("-------第", i, "段文本的词语tf-idf权重------")
    #     for j in range(len(wordlist)):
    #         if(weightlist[i][j]!=0):
    #             print(wordlist[j], weightlist[i][j])
    # return simbookweightlist
    return scores

def natural_search(rows, searchinfo,searchresult):
    scores =splitebook(rows,searchinfo)
    bookresults={k: v for k, v in sorted(scores.items(),key=lambda item: item[1], reverse=True)}
    for key in bookresults.keys():
        if(rows[key] not in searchresult):
            searchresult.append(rows[key])
    return searchresult

