"""
    data.py

    1. 数据预处理，变为 dataframe 格式
    2. 基于时间和搜索量进行分析
    3. 额外的一些辅助功能


    事件和搜索量
    词云
    主题建模
    情感分析
    目的：在以后需要处理某件事情的时候，可以预测大概在哪个时间点民众情绪和关注度将达到顶点，面对负面新闻，可以通过什么其他事件进行引流，降低
    民众对该事件关注度，从而减少负面影响。

"""
from collections import Counter
from statsmodels.tsa.seasonal import seasonal_decompose
from wordcloud import WordCloud
import pandas as pd
import jieba
import matplotlib.pyplot as plt
from scipy.signal import find_peaks


def jieba_cut(text):
    """
    使用 jieba 分词
    :param text: 待分词的文本
    :return: 分词列表
    """
    return [word for word in jieba.cut(text) if len(word) > 1]


def read_data(file):
    """
    读取数据，并进行预处理
    :param file: 文件路径
    :return: 预处理后的数据，格式为[日期、标题、分词列表、搜索量]
    """
    data = pd.read_excel(file)

    # 删除空值
    data = data.dropna(subset=['title', 'date', 'searchCount'])

    # 设置时间为 00:00:00
    data['date'] = pd.to_datetime(data['date']).apply(lambda x: x.normalize())

    data['searchCount'] = pd.to_numeric(data['searchCount'])
    data['title'] = data['title'].astype(str)

    # 手动分词
    data['words_list'] = data['title'].apply(jieba_cut)

    # 填充空值
    data['CR'] = data['CR'].fillna(0).astype(int)

    data = data[['date', 'title', 'words_list', 'searchCount', 'CR']]
    # print(data.head(10)[['date', 'title', 'words_list', 'CR']])

    return data


def plot_peak(data, is_plot=False):
    """
    根据每日的搜索总量，绘制峰值图
    :param data: 待分析数据
    :param is_plot: 是否绘制图像
    :return: [峰值日期、峰值标题、峰值搜索量], [峰值日期]
    """
    data_time_series = data.set_index('date')
    data_aggregated = data_time_series['searchCount'].resample('D').sum()

    peaks, _ = find_peaks(data_aggregated, prominence=1)

    # 创建可视化
    if is_plot:
        plt.figure(figsize=(12, 6))
        plt.plot(data_aggregated, label='Search Count')
        # 在峰值位置绘制“X”标记
        plt.plot(data_aggregated.index[peaks], data_aggregated.iloc[peaks], 'x', label='Peaks')
        plt.title('Search Volume Peaks Over Time')
        plt.xlabel('Date')
        plt.ylabel('Search Volume')
        plt.legend()
        # plt.show()
        plt.savefig('images/data_peak.png')

    # data_aggregated.index[id] 代表峰值的日期
    # data_aggregated.iloc[id] 代表峰值的搜索量

    peak_dates = data_aggregated.index[peaks]

    # 删除时间，仅保留日期，便于分析
    # 找出日期对应的标题
    peak_dates = [str(date).split(' ')[0] for date in peak_dates]
    peak_titles = []
    for date in peak_dates:
        titles = []
        for title in data_time_series.loc[date]['title']:
            titles.append(title)
        peak_titles.append((date, titles, data_aggregated.loc[date]))

    # for date, titles, cnt in peak_titles:
    #     print(date, titles, cnt)

    return peak_titles, peak_dates


def plot_time_series(data):
    """
    绘制时间序列分解图
    :param data: 待分析数据
    :return: None
    """
    data_time_series = data.set_index('date')

    # 按日期聚合搜索量
    data_aggregated = data_time_series['searchCount'].resample('D').sum()

    # 时间序列分解：趋势和季节性
    result = seasonal_decompose(data_aggregated, model='additive')

    # 绘制时间序列分解结果
    plt.figure(figsize=(14, 7))
    plt.subplot(311)
    plt.plot(result.trend, label='Trend')
    plt.legend(loc='best')
    plt.subplot(312)
    plt.plot(result.seasonal, label='Seasonality')
    plt.legend(loc='best')
    plt.subplot(313)
    plt.plot(result.resid, label='Residuals')
    plt.legend(loc='best')
    plt.tight_layout()
    # plt.show()
    plt.savefig('images/data_time_series.png')


def word_cloud(data, peak_dates):
    """
    绘制词云图
    :param data: 待分析数据
    :param peak_dates: 峰值日期
    :return: None
    """
    words_by_date = {}
    for date in peak_dates:
        entries = data[data['date'] == date]
        all_words = sum(entries['words_list'], [])
        words_by_date[date] = all_words

    for date, words in words_by_date.items():
        print(date, Counter(words))

    # 为每个日期生成词云图
    for date, words in words_by_date.items():
        word_counts = Counter(words)
        wordcloud = WordCloud(width=800, height=800, background_color='white',
                              font_path='/Library/Fonts/Arial Unicode.ttf').generate_from_frequencies(word_counts)

        # 显示词云图
        plt.figure(figsize=(8, 8))
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.axis('off')

        # 保存词云图到文件
        plt.savefig(f'images/wb_{date}.png')
        plt.close()


def titles_by_date(data, dates):
    """
    根据日期查看当天搜索量前 3 的标题
    :param data: 待分析数据
    :param dates: 日期列表，格式为：yyyy-mm-dd
    :return: 一个多元组列表，格式为：(date, titles)
    """
    titles_by_date = []
    for date in dates:
        entries = data[data['date'] == date]
        titles = entries.sort_values(by='searchCount', ascending=False)['title'].head(3).tolist()

        titles_by_date.append((date, titles))

    return titles_by_date


if __name__ == '__main__':
    pd.set_option('display.max_columns', None)  # 显示所有列
    pd.set_option('display.max_rows', None)  # 显示所有行

    data = read_data('res/Weibo_2020Coron.xlsx')
    # print(data.head(10))

    titles, peak_dates = plot_peak(data, is_plot=True)
    # print(peak_dates)
    plot_time_series(data)
    # word_cloud(data, peak_dates)

    # dates_test = ['2020-01-23', '2020-01-24', '2020-01-25', '2020-01-26', '2020-01-27']
    # titles = titles_by_date(data, peak_dates)
    # for x in titles:
    #     print(x)