#coding=utf-8

#http://www.cnblogs.com/HolyShine/p/6033368.html

import math

class IDF_MODE():

    IDF_for_all_kinds_of_words = {}

    def __init__(self):
        pass

    def __contains__(self, item):
        return item in self.IDF_for_all_kinds_of_words.keys()

    def __getitem__(self, item):
        return self.IDF_for_all_kinds_of_words[item]

    def train(self, all_docs):
        doc_number = 0
        word_count_by_docs = {}
        for doc in all_docs:
            doc_number += 1
            #print "+",
            #doc = doc.decode('utf-8')
            doc_kinds_of_word = set(doc.split())
            for word in doc_kinds_of_word:
                count = word_count_by_docs.get(word,0)
                count += 1
                word_count_by_docs[word] = count

        self.IDF_for_all_kinds_of_words = word_count_by_docs
        for word in self.IDF_for_all_kinds_of_words.keys():
            count = self.IDF_for_all_kinds_of_words[word]
            self.IDF_for_all_kinds_of_words[word] = math.log(doc_number / (count + 1))

    def save(self, path):
        import io
        f = io.open(path, "wt",encoding='utf-8')
        for key in self.IDF_for_all_kinds_of_words.keys():
            line = key + u"," + str(self.IDF_for_all_kinds_of_words[key]) + u"\n"
            f.write(line)
        f.close()


    def load(self, path):
        self.IDF_for_all_kinds_of_words = {}
        f = open(path,"r")
        for line in f.readlines():
            s = line.split(",")
            if len(s) != 2:
                continue
            #self.IDF_for_all_kinds_of_words[s[0].decode('utf-8')] = float(s[1])
            self.IDF_for_all_kinds_of_words[s[0]] = float(s[1])
        f.close()

    def TF_IDF_top_word(self, doc, top=5):


        def count_words_in_doc(doc):
            words_list = doc.split()
            words_map = {}
            for word in words_list:
                count = words_map.get(word,0)
                words_map[word] = count + 1

            for word in words_map.keys():
                count = words_map[word]
                words_map[word] = float(count) / len(words_list)
            return words_map

        TF_for_doc = count_words_in_doc(doc)

        TF_IDF_for_doc = {}
        #TF-IDF
        for word in TF_for_doc.keys():
            TF = TF_for_doc[word]
            TF_IDF_for_doc[word] = TF * self.IDF_for_all_kinds_of_words.get(word,0)


        TF_IDF_word_list=[]
        for word in TF_IDF_for_doc.keys():
            TF_IDF_word_list.append([word,TF_IDF_for_doc[word]])

        TF_IDF_word_list = sorted(TF_IDF_word_list, key=lambda x: x[1],reverse=True)

        rank_key_word = []
        i = 0
        for word in TF_IDF_word_list:
            if i >= top:
                break
            i += 1
            rank_key_word.append(word[0])
        return rank_key_word



if __name__ == "__main__":
    def readdoc(path):
        f = open(path, "r")
        lines = f.readlines()
        f.close()
        return lines


    docs_list_with_word_seperated_in_line = []
    docs_list_with_word_seperated_in_line.extend(readdoc("Data/dataset/step1/Auto.txt"))
    docs_list_with_word_seperated_in_line.extend(readdoc("Data/dataset/step1/Culture.txt"))
    docs_list_with_word_seperated_in_line.extend(readdoc("Data/dataset/step1/Economy.txt"))
    docs_list_with_word_seperated_in_line.extend(readdoc("Data/dataset/step1/Medicine.txt"))
    docs_list_with_word_seperated_in_line.extend(readdoc("Data/dataset/step1/Military.txt"))
    docs_list_with_word_seperated_in_line.extend(readdoc("Data/dataset/step1/Sports.txt"))

    idf_mode = IDF_MODE()

    train_enable = False

    if train_enable:
        idf_mode.train(docs_list_with_word_seperated_in_line)
        idf_mode.save("output/model/tf_idf.model")
    else:
        idf_mode.load("output/model/news-163-idf.model")

    print(" ".join(idf_mode.TF_IDF_top_word(docs_list_with_word_seperated_in_line[0],5)))

    print(" ".join(idf_mode.TF_IDF_top_word(docs_list_with_word_seperated_in_line[1],5)))

    print(" ".join(idf_mode.TF_IDF_top_word(docs_list_with_word_seperated_in_line[2],5)))

    print(" ".join(idf_mode.TF_IDF_top_word(docs_list_with_word_seperated_in_line[4001],5)))
    import jieba.posseg as pseg
    with open("output/doc_sample.txt",encoding="utf-8") as f:
        txt = f.read()
        words = pseg.cut (txt)
        doc = []
        for w in words:
            doc.append(w.word)
        doc = " ".join(doc)
        print(" ".join(idf_mode.TF_IDF_top_word(doc,20)))

        import jieba.analyse
        tags = jieba.analyse.extract_tags(doc)
        print(",".join(tags))
