"""
    attention.py
    计算事件的关注度得分

    目的：可以预测某个事件的持续性（话题追踪技术）
"""
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from data import read_data
import numpy as np
import pandas as pd


def attention_score(data):
    """
    计算事件的关注度得分
    :param data:
    :return: 根据关注度得分排序后的数据
    """
    data['title_cut'] = data['words_list'].apply(lambda x: ' '.join(x))


    tfidf = TfidfVectorizer()
    tfidf_matrix = tfidf.fit_transform(data['title_cut'])

    similarity_matrix = cosine_similarity(tfidf_matrix)
    similarity_thres = 0.5

    title_count = np.sum(similarity_matrix > similarity_thres, axis=1)

    data['persistance'] = title_count

    # 针对每一个title_cut,和所有大于阈值的相似的title_cut，计算最久的那个title_cut的日期和最新的那个title_cut的日期的差值
    # 作为该title_cut的持续时间
    for i in range(len(data)):
        similar_titles_idx = np.where(similarity_matrix[i] > similarity_thres)[0]
        similar_dates = data.iloc[similar_titles_idx]['date']
        if not similar_dates.empty:
            min_date = pd.to_datetime(similar_dates.min())
            max_date = pd.to_datetime(similar_dates.max())
            duration = max_date - min_date
            # 将duration转化为int类型，单位为天
            data.loc[i, 'duration'] = duration.days

    # 计算attention得分，attention = w1 * log(searchCount) + w2 * persistance/10 + w3 * duration/10
    w1 = 0.3
    w2 = 0.4
    w3 = 0.3
    data['attention'] = w1 * np.log(data['searchCount']) + w2 * data['persistance'] * 0.1 \
                                                         + w3 * data['duration'] * 0.1

    sorted_data = data.sort_values(by='attention', ascending=False)
    print(sorted_data[['date', 'title', 'persistance', 'attention', 'duration']].head(100))

    return sorted_data, data


def duration_prediction(data, title):
    """
    预测标题的持续时间
    :param data: 待分析数据
    :param title: 待分析标题
    :return: 该事件对应的索引idx
    """
    data['title_cut'] = data['words_list'].apply(lambda x: ' '.join(x))
    print(data['title_cut'])

    tfidf = TfidfVectorizer()
    tfidf_matrix = tfidf.fit_transform(data['title_cut'])

    # 为单独的文本创建TF-IDF向量
    processed_text = ' '.join(jieba.cut(title))
    # print(f'processed_text: {processed_text}')
    text_vector = tfidf.transform([processed_text])

    cosine_similarities = cosine_similarity(text_vector, tfidf_matrix)
    idx = np.argmax(cosine_similarities)
    # print(f'idx: {idx}')

    return idx


if __name__ == '__main__':
    data = read_data('res/Weibo_2020Coron.xlsx')

    # 预测某件事的持续时间
    idx = duration_prediction(data, '王一博跳水很开心')
    sorted_data, idx_data = attention_score(data)
    print(f'预测事件持续时间：{idx_data.iloc[idx]["duration"]}')

    # 关注度排名
    # print(sorted_data[['date', 'title', 'persistance', 'attention', 'duration']].head(100))



