"""
    modeling.py
    使用主题建模的方法对热搜内容进行分析
"""

import jieba
from gensim import corpora, models
from matplotlib import pyplot as plt
from data import read_data


def lda_modeling(data):
    """
    使用 LDA 模型对热搜内容进行分析,
    展示每日热搜前3个主题，以及每个主题的前5个关键词
    :param data: 待分析数据
    :return:    Date: 2020-01-01 00:00:00
                (0, '0.019*"现场" + 0.019*"跨年" + 0.015*"2020" + 0.010*"回应" + 0.010*"邓紫棋"')
                (1, '0.006*"跨年" + 0.006*"现场" + 0.006*"重庆" + 0.006*"合唱" + 0.006*"2020"')
                (2, '0.006*"跨年" + 0.006*"现场" + 0.006*"肖战" + 0.006*"王一博" + 0.006*"2020"')
    """
    # data['cut'] = data['title'].apply(preprocess)
    data['date'] = data['date']
    grouped_titles_cut = data.groupby('date')['words_list'].apply(sum)

    topics_by_date = {}
    for date, titles in grouped_titles_cut.items():
        if titles:
            dictionary = corpora.Dictionary([titles])
            corpus = [dictionary.doc2bow(titles)]

            lda = models.LdaModel(corpus, num_topics=3, id2word=dictionary, passes=15)

            topics = lda.print_topics(num_words=3)
            topics_by_date[date] = topics

    for date, topics in topics_by_date.items():
        print(f"Date: {date}")
        for topic in topics:
            print(topic)
        print("\n")

    return topics_by_date


def lda_classification(data):
    """
    对非疫情相关的热搜进行主题分类，返回一个0-4的主题编号
    :param data: 待分析数据
    :return: lda模型（data中的topic已经被修改）
    """

    c_data = data[data['CR'] == 0]

    print(len(c_data))

    dictionary = corpora.Dictionary(c_data['words_list'])
    corpus = [dictionary.doc2bow(text) for text in c_data['words_list']]

    lda = models.LdaModel(corpus, num_topics=5, id2word=dictionary, passes=15)

    # for idx, topic in lda.print_topics(num_words=5):
    #     print('Topic: {} \nWords: {}'.format(idx, topic))

    topics = [max(lda.get_document_topics(bow), key=lambda x: x[1])[0] for bow in corpus]
    c_data['topic'] = topics
    data.loc[c_data.index, 'topic'] = c_data['topic']

    # print(c_data.head(10)[['title', 'topic']])
    # print(data.head(10)[['title', 'topic']])
    return lda


coron_words = [
    '疫情', '防护', '检测', '病例', '核酸', '口罩', '隔离', '复工', '确诊', '传播', '疫苗', '治疗', '武汉', '封城', '医护',
    '患者', '症状', '防控', '抗疫', '疾控', '病毒', '疫区', '疫苗', '隔离', '检测', '传播', '确诊', '治疗', '口罩']


def title_prediction(lda, title):
    """
    预测标题的主题
    :param lda: lda模型
    :param title: 待预测标题
    :return: 主题编号（5代表疫情，0-4代表其他）
    """

    # 首先检测是否是疫情相关
    words = jieba.lcut(title)
    for word in words:
        if word in coron_words:
            # 检测为疫情相关
            # 调用相关函数

            return 5

    dictionary = lda.id2word
    corpus = [dictionary.doc2bow(jieba.lcut(title))]

    topic = max(lda.get_document_topics(corpus[0]), key=lambda x: x[1])[0]

    return topic


def plot_trend_after_classification(data, topic=5):
    """
    完成大作业核心目的之一：给定一个主题，绘制该主题的热搜趋势图，同时分析持续时间
    :param data:
    :param topic:
    :return:
    """
    if topic == 5:
        # 疫情图
        print('疫情图')
        p_data = data[data['CR'] == 1]
        p_data = p_data.groupby('date')['searchCount'].sum()

    else:
        print('非疫情图')
        p_data = data[data['topic'] == topic]
        p_data = p_data.groupby('date')['searchCount'].sum()

    plt.figure(figsize=(12, 6))
    p_data.plot(kind='line')
    plt.title(f'Daily Search Count for Topic {topic}')
    plt.xlabel('Date')
    plt.ylabel('Search Count')
    plt.grid(True)
    plt.savefig(f'images/topic_prediction.png')
    # plt.show()

    return None


if __name__ == '__main__':
    data = read_data('res/Weibo_2020Coron.xlsx')

    # 每日主题进行分析
    # lda_modeling(data)

    # 对给定新对标题，进行预测分析
    lda = lda_classification(data)  # 训练模型，在原data基础上添加了topic列
    tnum = title_prediction(lda, '名侦探学院')   # 预测标题的主题
    plot_trend_after_classification(data, tnum)     # 绘制该主题的热搜趋势图


