# -*- codeing = utf-8 -*-
# @Time : 2021/5/10 8:58 上午
# @Author : 田巍
# @File ：tfidf_python.py
# @Software : PyCharm
from collections import Counter
import math
import numpy as np

# 语料
corpus = [
    'this is the first document',
    'this is the second document'
]

# 对语料进行分词
word_list = []
for i in range(len(corpus)):
    word_list.append(corpus[i].split(' '))

# 统计词频
countlist = []
for i in range(len(word_list)):
    count = Counter(word_list[i])
    countlist.append(count)
print(countlist[0])


# 定义公式
# word可以通过count得到，count可以通过countlist得到
# count[word]可以得到每个单词的词频，sum(count.values())可以得到整个句子的单词总数

def tf(word, count):
    return count[word] / sum(count.values())


# 统计的含有该单词的句子数
def n_containing(word, count_list):
    return sum(1 for count in count_list if word in count)


# len(count_list)是指的句子的总数，n_containing(word,count_list)是指含有该单词的句子总数，加一是为了防止分母为0
def idf(word, count_list):
    return math.log(len(count_list) / (1 + n_containing(word, count_list)))


# 将tf 与 idf相乘
def tfidf(word, count, count_list):
    return tf(word, count) * idf(word, count_list)


# 计算 tfidf


# 获得所有key的值
key = []
for line in countlist:
    for word in line:
        key.append(word)
key = set(key)
key_list = []
for i in key:
    key_list.append(i)
# print(key_list)


# 计算每个句子对应单词的的tfidf的值，然后整合到字典sorted_words_sum中
sorted_words_sum = {}
for i, count in enumerate(countlist):
    scores = {word: tfidf(word, count, countlist) for word in count}

    sorted_words_sum.update(scores)

# 计算两个句子的tfidf的值
sorted_words_sum_dict1 = {}
for i in sorted_words_sum:
    if i in word_list[0]:
        sorted_words_sum_dict1[i] = sorted_words_sum[i]
    else:
        sorted_words_sum_dict1[i] = -1
print(sorted_words_sum_dict1)

sorted_words_sum_dict2 = {}
for i in sorted_words_sum:
    if i in word_list[1]:
        sorted_words_sum_dict2[i] = sorted_words_sum[i]
    else:
        sorted_words_sum_dict2[i] = -1
print(sorted_words_sum_dict2)

value1 = []
for value in sorted_words_sum_dict1.values():
    value1.append(value)
print(value1)

value2 = []
for value in sorted_words_sum_dict1.values():
    value2.append(value)
print(value2)


# 计算两个文本的相似度

def sim(list1, list2):
    mu = np.array(list1).dot(np.array(list2))
    list1_mu = np.array(list1).dot(np.array(list1))
    list2_mu = np.array(list2).dot(np.array(list2))
    return mu / (list1_mu * list2_mu)


print("两行文本文本的相似度为：{}".format(sim(value1, value2)))
'''
# 语料的准备
# 对语料进行分词
# 统计词频 Counter
# 定义公式 tf idf tfidf
'''
