import jieba
import pickle
import numpy as np

# 加载自定义词典
jieba.load_userdict("userdict.txt")

# 将停用词读出放入stopword列表中
filepath = r'stopwords.txt'
stopwords = [line.strip() for line in open(filepath, 'r', encoding="utf-8").readlines()]


# 读出文件，将其句子分词
def readfile2wordlist(file_path):
    word_list = []
    cut_word_list = []
    with open(file_path, 'r', encoding="utf-8-sig") as f:
        for line in f.readlines():
            seg_list = jieba.cut(line.strip())
            seg_list = [i for i in seg_list if i not in stopwords and i != ' ']
            word_list.append(line.strip())
            cut_word_list.append(seg_list)
    return word_list, cut_word_list


def calculate_idf(cut_word_list):
    wordinfilecount = {}
    words_count = len(cut_word_list)
    for words in cut_word_list:
        for word in set(words):
            if str(word) not in wordinfilecount:
                wordinfilecount[str(word)] = 1
            else:
                wordinfilecount[str(word)] += 1
    for key in wordinfilecount.keys():
        idf = np.log((words_count + 1) / (wordinfilecount[key] + 1))
        wordinfilecount[key] = idf
    return wordinfilecount


# 制作词袋
def make_token2id(cut_word_list):
    n = 0
    token2id = {}
    for words in cut_word_list:
        for word in words:
            if word not in token2id:
                token2id[word] = n;
                n += 1
    return token2id


# 统计词的频数，并返回字典
def make_word_freq(word_list):
    freword = {}
    for word in word_list:
        if str(word) not in freword:
            freword[str(word)] = 1
        else:
            freword[str(word)] += 1
    return freword


def make_tfidf_library(cut_word_list, idf_dict):
    tfidf_library = []
    token2id = make_token2id(cut_word_list)
    for cut_words in cut_word_list:
        w_vec = np.zeros(len(token2id))
        count = len(cut_words)
        freword = make_word_freq(cut_words)
        for word in cut_words:
            ind = token2id[word]
            tf = freword[word] / count
            idf = idf_dict[word]
            w_vec[ind] = tf * idf
        tfidf_library.append(w_vec.tolist())
    return tfidf_library


word_list, cut_word_list = readfile2wordlist('commodity_title.txt')
idf_dict = calculate_idf(cut_word_list)
token2id = make_token2id(cut_word_list)
tfidf_library = make_tfidf_library(cut_word_list, idf_dict)

with open('tfidf_data.pk', 'wb') as f:
    pickle.dump((word_list, token2id, idf_dict, tfidf_library), f)

import jieba
import pickle
import numpy as np

# 加载自定义词典
jieba.load_userdict("userdict.txt")

# 将停用词读出放入stopword列表中
filepath = r'stopwords.txt'
stopwords = [line.strip() for line in open(filepath, 'r', encoding="utf-8").readlines()]


# 针对一句话进行分词
def split_words(word):
    cut_word = [i for i in jieba.lcut(word) if i not in stopwords and i != ' ']
    return cut_word


# 统计词的频数，并返回字典
def make_word_freq(word_list):
    freword = {}
    for word in word_list:
        if str(word) not in freword:
            freword[str(word)] = 1
        else:
            freword[str(word)] += 1
    return freword


# 计算tfidf，组成tfidf矩阵
def make_tfidf(cut_words, token2id, idf_dict):
    word_list = [word for word in cut_words if word in token2id]
    count = len(word_list)
    word_freq = make_word_freq(cut_words)
    w_vec = np.zeros(len(token2id))
    for word in word_list:
        ind = token2id[word]
        idf = idf_dict[word]
        w_vec[ind] = float(word_freq[word] / count) * float(idf)
    return w_vec


# 余弦相似度
def Cos_Distance(vector1, vector2):
    vec1 = np.array(vector1)
    vec2 = np.array(vector2)
    return float(np.sum(vec1 * vec2)) / (np.linalg.norm(vec1) * (np.linalg.norm(vec2)))


# 计算相似度
def similarity_words(vec, vecs_list):
    Similarity_list = []
    for vec_i in vecs_list:
        Similarity = Cos_Distance(vec, vec_i)
        Similarity_list.append(Similarity)
    return Similarity_list


def main(words, token2id, idf_dict, tfidf_library):
    cut_words = split_words(words)
    vec = make_tfidf(cut_words, token2id, idf_dict)
    similarity_lists = similarity_words(vec, tfidf_library)
    sorted_res = sorted(enumerate(similarity_lists), key=lambda x: x[1])
    outputs = [[word_list[i[0]], i[1]] for i in sorted_res[-1:-11:-1]]
    return outputs


if __name__ == '__main__':
    words = '小米8 全面屏游戏智能手机  6GB+128GB 黑色 全网通4G 双卡双待 拍照手机'
    outputs = main(words, token2id, idf_dict, tfidf_library)
    for i in outputs:
        print(i[0] + '                 ' + str(i[1]))
