import pandas as pd
import numpy as np
import math
from cosine import Cosine


def lower_case():
    location = r'data2.csv'
    df = pd.read_csv(location)
    total = []  # 所有字符串放在一起
    total_list = []  # 每个document为一个子数组
    for i in df['review']:
        # 转成小写
        i = i.lower()
        sentence = i.split(' ')
        total_list.append(sentence)
        total = total + sentence  # 2个list合并成一个list
    # numpy方法，转换成小写
    # total_lower = list(np.char.lower(total))
    return total, total_list


def stop_words(total):
    # print('去停用词......')
    with open(file='stopwords_en.txt', mode='rt', encoding='utf8', errors='ignore') as f:
        line = f.readline().strip()
        while line:
            while line in total:
                total.remove(line)
            line = f.readline().strip()
    return total


def remove_char(total):
    # 去除符号char
    symbols = ",.!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
    for i in symbols:
        total = np.char.replace(total, i, '')
    # 去引号
    total = np.char.replace(total, "'", '')  # list
    # 去单个字符
    new_text = ""
    for w in total:  # str
        if(new_text!=""):
            new_text=new_text+" "
        if len(w) > 1:
            new_text = new_text + w
    return new_text



def calculate_tf(terms, total_list, n):  # 需要对每个文档进行单词进行遍历
    terms = list(terms)
    arr = np.zeros((n, len(terms)), dtype=float)
    for i in range(n):  # 遍历行
        for j in range(len(terms)):  # 遍历列
            count = 0
            term = terms[j]
            for z in range(len(total_list[i])):
                if term == total_list[i][z]:
                    count += 1
            arr[i][j] = round(count / len(total_list[i]),3)
    tf = pd.DataFrame(data=arr, dtype=float, columns=terms)
    return tf


def calculate_df(terms, total_list, n):
    terms = list(terms)
    df = {}
    for i in range(len(terms)):
        count = 0
        term = terms[i]
        for j in range(n):
            if term in total_list[j]:
                count += 1
        df[str(term)] = count
    return df


# tf-idf(t, d) = tf(t, d) * log(N/(df + 1))
# tf-idf =该词词频 * log（文章总数/包含该词的文章数+1）
def calculate_tfidf(tf, df, terms, n):
    terms = list(terms)
    arr = np.zeros(shape=(n, len(terms)), dtype=float)
    arr_tf = tf.values  # dataFrame 转 np array
    for i in range(n):
        for j in range(len(terms)):
            term = terms[j]
            x = df[term]
            arr[i][j] = round(arr_tf[i][j] * math.log(n / (x + 1)),3)  # log取了以10为底
    return arr


#数据预处理，包括
def pretreatment():
    print('预处理开始......')
    print('转小写......')
    total, total_list = lower_case()
    print('去停用词......')
    total = stop_words(total=total)
    print('去字符......')
    new_text = remove_char(total=total)


    words = new_text.split()
    for i in range(len(total_list)):
        total_list[i] = stop_words(total=total_list[i])
        total_list[i] = remove_char(total=total_list[i])
    print('预处理完成......')
    return words, total_list


if __name__ == '__main__':
    n = 7  # 计算的文档数量
    words, total_list = pretreatment()
    with open('words.csv', mode='w', encoding='utf8', errors='ignore') as f:
        for i in words:
            f.write(i + ' ')
    terms = set(words)  # 使用set集合去重

    total_list2=[]
    for i in range(len(total_list)):
        total_list2.append(str(total_list[i]).split(' '))

    # print(words)
    # print(terms)
    # print(total_list)
    # print(total_list2)

    print('计算term frequency......')
    tf = calculate_tf(terms=terms, total_list=total_list2, n=n)
    tf.to_csv('tf.csv')

    print('计算document frequency......')
    df = calculate_df(terms=terms, total_list=total_list2, n=n)  # dict
    df_pandas = pd.DataFrame(data=df, dtype=float, columns=list(terms), index=[0])
    df_pandas.to_csv('df.csv')

    print('计算tf-idf......')
    tfidf = calculate_tfidf(tf=tf, df=df, terms=terms,n=n)
    pf_tdidf = pd.DataFrame(data=tfidf, dtype=float, columns=list(terms))
    pf_tdidf.to_csv('tfidf.csv')

    # 余弦相似度计算  n_recommendation 为推荐文档的数量
    consine = Cosine(n_recommendation=3)
    indices, similarities = consine.cal_similarity(tfidf)

    print(similarities)##
    print(indices)##

    # 结果展示  n为计算文档的数量
    for i in range(n):
        title = total_list[i]
        index = indices[i]
        similarity = similarities[i]
        print("与句子《{}》相似的文档:".format(title))
        for idx, sim in zip(index, similarity):
            print("\t\t《{}》:{:.5}".format(total_list[idx], sim))
        print()


