#!/usr/bin/python
# -*- coding: UTF-8 -*-
#author:Gao Xiaolin
#summary:使用tfidf算法实现一篇新闻语料的关键词抽取

import jieba
import re
import os
import collections

import math
import operator

# 获取停用词表
def get_stop_word(path):
    rlist = []
    with open(path, encoding='utf-8') as f:
        for line in f:
            word = str(line).replace('\n', '')
            rlist.append(word)
    return rlist

def get_file_list(path):
    return [path + file for file in os.listdir(path)]

#取得分割后的句子列表和单词列表
def corpus(filelist, swlist):
    document_tf_list = []
    all_word_list = []
    for file in filelist:
        cut_sentence_list, word_list = get_cut_list(file, swlist)
        word_dict = collections.Counter(word_list)
        document_tf_list.append(word_dict)
        for w in word_list:
            if w not in all_word_list:
                all_word_list.append(w)
    return document_tf_list, all_word_list

#取得分词后的按句分割的list,和词表list
def get_cut_list(path, sw_list):
    word_list = []
    cut_sentence_list = []
    with open(path, encoding='utf-8') as f:
        corpus = "".join(f.readlines()).replace('\n', '')
        sentence_delimiters = re.compile(u'[,.!?;，。！？；]')
        sentence_list = [i for i in sentence_delimiters.split(corpus) if i != ""]
    for sentence in sentence_list:
        wl = [w for w in jieba.cut(sentence) if w not in sw_list]
        cut_sentence_list.append(wl)
        for word in wl:
            word_list.append(word)
    return cut_sentence_list, word_list

#每个词的tf
def get_tf(document):
    all_cnt = 0
    tf_dict = {}
    for k,v in document.items():
        all_cnt += v
    for k,v in document.items():
        tf_dict[k] = v / all_cnt
    return tf_dict

def wordinfilecount(word, corpuslist):  # 查出包含该词的文档数
    count = 0  # 计数器
    for i in corpuslist:
        for j in i:
            if word in set(j):  # 只要文档出现该词，这计数器加1，所以这里用集合
                count = count+1
            else:
                continue
    return count


def tf_idf(wordlis, filelist, corpuslist):  # 计算TF-IDF,并返回字典
    outdic = {}
    tf = 0
    idf = 0
    dic = freqword(wordlis)
    outlis = []
    for i in set(wordlis):
        tf = dic[str(i)]/len(wordlis)  # 计算TF：某个词在文章中出现的次数/文章总词数
        # 计算IDF：log(语料库的文档总数/(包含该词的文档数+1))
        idf = math.log(len(filelist)/(wordinfilecount(str(i), corpuslist)+1))
        tfidf = tf*idf  # 计算TF-IDF
        outdic[str(i)] = tfidf
    orderdic = sorted(outdic.items(), key=operator.itemgetter(
        1), reverse=True)  # 给字典排序
    return orderdic

def get_keyword_tf_idf(freq_list, top_k = 10):
    for freq in freq_list:
        #总的次数
        total_words = len(freq.items()[0])
        tf_dict = [(k, v/total_words) for k, v in freq.items()]



def main():
    swpath = r'./中文停用词表.txt'
    swlist = get_stop_word(swpath)

    output = r'TFIDF.txt'

    #获取文件列表
    filepath = './data/'
    filelist = get_file_list(filepath)

    freq_list = []
    all_word_list = []

    document_word_dict, all_word_list = corpus(filelist, swlist)

    word_doc_dict={}
    for word in all_word_list:
        word_doc_dict[word] = 0
        for doc in document_word_dict:
            if word in doc:
                word_doc_dict[word] += 1

    document_cnt = len(filelist)
    tf_idf = {}
    for document in document_word_dict:
        tf = get_tf(document)
        for k,v in tf.items():
            tf_idf[k] = v * document_cnt / word_doc_dict[k]

        print(sorted(tf_idf.items(), key=lambda kv:(kv[1], kv[0]), reverse=True))


if __name__ == '__main__':
    main()
