# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer, TfidfVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
import pandas as pd
import matplotlib.pyplot as plt

import Recommender.General as Genaral

# ---找出最优主题数---
# n_doc语料库大小，step步长，用于给出主题数范围；TransformMethod选tf or tfidf
def Best_Topic_via_Perplexity(segments_list, step, start_topics, end_topics):
    #
    string_list = []
    for segments in segments_list:
        string_list.append(" ".join(segments))

    #
    tf_vectorizer, tf_byDocument = TfTransform(string_list)

    #
    n_docs = len(segments_list)

    perplex = dict()
    for n_topics in range(start_topics, end_topics, step):
        #
        perplex[n_topics] = list()

        lda = Train_LDAModel(tf_byDocument, n_topics)
        # train_gamma = lda.transform(news_cut_list) ##得到topic-document 分布
        perplexity = lda.perplexity(tf_byDocument)  ###s计算测试集困惑度
        perplex[n_topics].append(perplexity)
        #
        print(n_topics, perplexity)
        pass

    perplex_df = pd.DataFrame(perplex)
    plt.figure()
    plt.plot(perplex_df.columns.values, perplex_df.iloc[0].values, '#007A99')
    plt.xticks(perplex_df.columns.values)
    plt.ylabel('train Perplexity')
    plt.show()
    # plt.savefig('lda_topic_perplexity.png', bbox_inches='tight', pad_inches=0.1)
    return perplex_df


#将切好词的文档转换为TF矩阵
def TfTransform(news_cut_list):
    # 先忽略所有以下参数：max_df=0.95, min_df=2,max_features=n_features（选取至少出现过两次并且数量为前2000的单词用来生成文本表示向量）
    tf_vectorizer = CountVectorizer()
    tf = tf_vectorizer.fit_transform(news_cut_list)
    return tf_vectorizer, tf


#将切好词的文档转换为TFIDF矩阵
def TfIdfTransform(news_cut_list):
    tfidf_vectorizer = TfidfVectorizer()
    tfidf = tfidf_vectorizer.fit_transform(news_cut_list)
    return tfidf_vectorizer, tfidf


# ---input is Segment List---
def Train_LDAMOdel_Segments(segments_list, n_topic, print=False):
    # 拼接成带空格的字符串，适应lda的输入格式
    segments_string_list = []
    for segments in segments_list:
        segments_string_list.append(" ".join(segments))
    #
    return Train_LDAModel_String(segments_string_list, n_topic, print)


#
def Train_LDAModel_String(string_list, n_topic, print=False):
    #
    tf_vectorizer, tf_byDocument = TfTransform(string_list)
    lda = Train_LDAModel(tf_byDocument, n_topic)

    #
    if print:
        term = tf_vectorizer.get_feature_names()  # 词汇表
        topic_term = lda.components_  # topic-term分布
        Print_Topic_Term(topic_term, term)

    return lda


# ---input is String of Segments with Space---
def Train_LDAModel(tf_byDocument, n_topic):
    print("Train LDA Model")
    #
    # 定义lda模型
    lda = LatentDirichletAllocation(n_components=n_topic,
                                    #max_iter=50,
                                    learning_method='batch',
                                    learning_offset=50.,
                                    random_state=0)

    #
    doc_topic = lda.fit_transform(tf_byDocument)  # doc-topic分布
    topic_term = lda.components_  # topic-term分布
    #term = tf_vectorizer.get_feature_names()  # 词汇表

    #
    # test_doc_topic = lda.transform(tf_byDocument)
    # df_test_doc_topic = pd.DataFrame(test_doc_topic)
    # df_test_doc_topic.to_csv("D:/Data/LDA/test_document_topic.csv")

    #
    #Print_Topic_Term(topic_term, term)

    #
    df_doc_topic = pd.DataFrame(doc_topic)
    df_doc_topic.to_csv("D:/Data/LDA/document_topic.csv")

    #
    df = pd.DataFrame(topic_term)
    df = df.transpose()
    # print(df.head())
    df.to_csv("D:/Data/LDA/topic_term.csv")

    return lda


def Test_LDAModel(segment, lda):
    pass


def Print_Topic_Term(topic_term, term):
    #
    print("Print_Topic_Term")

    #
    df = pd.DataFrame(topic_term)
    df = df.transpose()


    #
    nColumns = df.columns.size

    #
    dfTerm = pd.DataFrame(term)
    dfTerm.rename(columns={0: "Term"}, inplace=True) #避免重名

    df = pd.concat((df, dfTerm), axis=1)
    print(df.head())

    for iColumns in range(nColumns):
        print("Sorted by", iColumns)

        # 重新排序
        dfSorted = df.sort_values(by=iColumns, ascending=False)
        dfSorted.reset_index(inplace=True)
        dfSorted.to_csv("D:/Data/LDA/topic_term_sorted_" + str(iColumns) + ".csv")

        # 填入新表
        data = []
        #dfTopics = pd.DataFrame(columns=('Word', "Frequency"))  # 生成空的pandas表
        for i in range(len(df)):
            # print(i)
            row = dfSorted.iloc[i]
            # print(row)
            word = row["Term"]
            frequency = row[iColumns]
            data.append([word, frequency])
            #dfTopics.loc[i] = [word, frequency]

        dfTopics = pd.DataFrame(data, columns=('Word', "Frequency"))

        #
        dfTopics.to_csv("D:/Data/LDA/topic_term_and_frequency_" + str(iColumns) + ".csv")
        pass

    pass


#
from Core.Config import *
config = Config()
database = config.DataBase()
realtime = config.RealTime()


#segmentsList = Genaral.Load_Segments(database, "Corpus5", 100)
segmentsList = Genaral.Load_Segments(database, "Corpus5", 10000)

#perplex_df = LDAModel.Best_Topic_via_Perplexity(segmentsList, step=100, start_topics=100, end_topics=1000)  #建议结合tf和tfidf同时检验

# ---得到主题数为Corpus5-10000-200最好---
n_topics = 200
Train_LDAMOdel_Segments(segmentsList, n_topics, print=True)
