from collections import defaultdict
import math
# import torch
import operator

def tf_idf_cal(list_words):
    # 总词频统计
    # 在字典中没有key的时候 返回int默认值
    doc_frequency = defaultdict(int)
    for word in list_words:
        doc_frequency[word] += 1

    # 计算每个词的TF值
    totalWordsLen = sum(doc_frequency.values())
    word_tf = {}  # 存储没个词的tf值
    for i in doc_frequency:
        word_tf[i] = doc_frequency[i] / totalWordsLen

    # 计算每个词的IDF值
    doc_num = len(list_words)
    word_idf = {}  # 存储每个词的idf值
    word_doc = defaultdict(int)  # 存储包含该词的文档数
    for i in doc_frequency:
        for j in list_words:
            if i in j:
                word_doc[i] += 1
    for i in doc_frequency:
        word_idf[i] = math.log(doc_num / (word_doc[i] + 1))

    # 计算每个词的TF*IDF的值
    word_tf_idf = {}
    for i in doc_frequency:
        word_tf_idf[i] = word_tf[i] * word_idf[i]

    return word_tf_idf


# def turn_Vector(text_a, text_b):
#
#     word_tf_idf_A = tf_idf_cal(text_a)
#     word_tf_idf_B = tf_idf_cal(text_b)
#
#     text_a.extend(text_b)
#     allWords = list(set(text_a))
#
#     idx2token = dict()
#     for i, token in enumerate(allWords):
#         idx2token[i] = token
#
#     vec_A = torch.zeros((1, len(idx2token)))
#     vec_B = torch.zeros((1, len(idx2token)))
#
#     for a, vec in enumerate(vec_A):
#         for i, a in enumerate(vec):
#             token = idx2token[i]
#             if token in word_tf_idf_A.keys():
#                 vec[i] = word_tf_idf_A[token]
#
#     for i, vec in enumerate(vec_B):
#         for i, a in enumerate(vec):
#             token = idx2token[i]
#             if token in word_tf_idf_B.keys():
#                 vec[i] = word_tf_idf_B[token]
#
#     return vec_A, vec_B#