from collections import defaultdict
import math
import time
import numpy as np
import tqdm

def is_stop_word(word):
    stop_word_list = ['是/v', '要/v', '有/v', '人/n', '说/v', '上/f','到/v']
    return not (("/w" in word) or ("/u" in word) or ("/d" in word) or ("/p" in word) or ("/c" in word) or ("/r" in word) or ("/m" in word) or ("/f" in word) or word == "\n" or word in stop_word_list)

def cal_similarity(x, y, norm=False):
    """ 计算两个向量x和y的余弦相似度 """
    assert len(x) == len(y), "len(x) != len(y)"
    zero_list = [0] * len(x)
    if any(x == zero_list) or any(y == zero_list):
        return float(1) if any(x == y) else float(0)

    # method 1
    # res = np.array([[x[i] * y[i], x[i] * x[i], y[i] * y[i]] for i in range(len(x))])
    # cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))

    # method 2
    # cos = bit_product_sum(x, y) / (np.sqrt(bit_product_sum(x, x)) * np.sqrt(bit_product_sum(y, y)))

    # method 3
    dot_product, square_sum_x, square_sum_y = 0, 0, 0
    for i in range(len(x)):
        dot_product += x[i] * y[i]
        square_sum_x += x[i] * x[i]
        square_sum_y += y[i] * y[i]
    cos = dot_product / (np.sqrt(square_sum_x) * np.sqrt(square_sum_y))

    return 0.5 * cos + 0.5 if norm else cos  # 归一化到[0, 1]区间内

fp = open('199801_clear (1).txt', 'r', encoding='gbk')
original_list = fp.readlines()
pre = "001"
text_list = []
now = ""
print("处理前的行数:",len(original_list))
for ori in original_list:
    # 每一个行
    if ori == "\n":
        continue
    temp = ori.split("/m  ",1)  # 分隔符会被忽视, 1代表只切一刀, 前一半存到temp[0] 后一半存到 temo[1]
    num = temp[0].split("-")[2];
    text = temp[1];
    if pre == num:
        now = now + " " + text
    else:
        now.replace("\n","")
        text_list.append(now)
        now = ""
    pre = num
    # print(text_list)

#word_tfidf = tf_idf_cal(text_list)
print("处理后行数:",len(text_list))

# 计算TF-IDF
start = time.time() # 计算开始时间
doc_frequency = defaultdict(int)    # 创建一个ket-value字典
doc_appear = defaultdict(int)    # 创建一个ket-value字典
text_all = []
for text in text_list:
    word_list_one = text.split(" ")
    word_list_one = list(filter(None, word_list_one))
    word_list_one = list(filter(is_stop_word, word_list_one))

    for word in word_list_one:
        doc_frequency[word] += 1    # 每个单词的出现次数

    only_one = list(set(word_list_one))
    for word in only_one:
        doc_appear[word] += 1       # 每个单词出现在文档中的次数
    text_all.append(word_list_one)

word_tfidf = {}
for word in doc_frequency:
    word_tf = doc_frequency[word]/len(doc_frequency)
    word_idf = math.log(len(text_list) / (doc_appear[word] + 1))
    word_tfidf[word] = word_tf * word_idf
end = time.time()
print("计算tf-idf用时：", end - start) # 计算总用时


# 降序排列并
start = time.time() # 计算开始时间
key_word_list = []
for i, text in enumerate(text_all): # 对dicts循环
    text_dir = {}
    for word in text:
        text_dir[word] = word_tfidf[word]
    # 按照tfdif对textidr排序
    text_dir = sorted(text_dir.items(), key=lambda d: d[1], reverse=True)
    # 排除掉 tfidf值
    for i in range(len(text_dir)):
        text_dir[i] = text_dir[i][0]
    key_word_list.append(text_dir[0:20]) # 取前二十个作为这篇文章的关键词
end = time.time()
print("筛选文章keywords：", end - start) # 计算总用时

# 将计算而来的关键词转换为one-hot向量
start = time.time() # 计算开始时间
keywords = set()
for key_word_text in key_word_list:
    for word in key_word_text:
        keywords.add(word)
keywords = list(keywords)
onehot_vectors = np.zeros((len(key_word_list), len(keywords)), dtype=np.int64)
for i, doc_words in enumerate(key_word_list):
    for j, word in enumerate(doc_words):
        if word in keywords:
            onehot_vectors[i][keywords.index(word)] = 1
end = time.time()
print("构建每个文章向量用时：", end - start)

# 计算两两间相似度
start = time.time() # 计算开始时间
len_docs = len(key_word_list) # 计算文章总数
similarity_metrix = np.zeros((len_docs, len_docs)) # 初始化文章间两两相似矩阵
for i, onehot_vector_i in enumerate(onehot_vectors):
    print(i)
    for j, onehot_vector_j in enumerate(onehot_vectors): # 计算文章i和文章j之间的余弦相似度
        similarity_metrix[i, j] = cal_similarity(onehot_vector_i, onehot_vector_j)
end = time.time() # 计算总用时
print(similarity_metrix)
print("计算两两相似度用时：", end - start)
