import os
from datetime import datetime

import numpy as np
import pandas as pd
import pymysql
from jinja2 import Template
from setuptools.sandbox import save_path
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import re
from itertools import combinations
import seaborn as sns

from mail import send_email
from rss_oss import upload_file_and_get_url

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False    # 解决负号显示问题


def clean_keyword_list(dirty_list):
    """
    正确清理关键词列表，返回纯净关键词的列表
    """
    clean_list = []

    for item in dirty_list:
        # 方法1：使用正则表达式提取所有关键词
        # 改进正则表达式，更可靠地匹配关键词
        keywords = re.findall(r'\d+:\s*([^0-9]+?)(?=\s*\d+:|$)', item)

        # 方法2：如果正则匹配失败，使用分割方法
        if not keywords or len(keywords) < 3:
            # 统一处理分隔符
            normalized = re.sub(r'\s+', ' ', item.replace('\n', ' '))
            # 按数字序号分割字符串
            parts = re.split(r'\s*\d+:\s*', normalized)
            # 过滤空字符串并去除两端空格
            keywords = [part.strip() for part in parts if part.strip() if part.strip() and not part.strip().isdigit()]

        # 添加到最终列表（确保只添加有效关键词）
        for kw in keywords:
            if kw:  # 确保不是空字符串
                clean_list.append(kw)

    return clean_list


def create_co_occurrence_matrix(documents):
    """
    从文档列表创建共现矩阵 - 修复了zeros()参数错误
    """
    # 获取所有唯一关键词
    all_keywords = sorted(set(keyword for doc in documents for keyword in doc))

    # 创建零矩阵（修复了参数错误）

    matrix_data = np.zeros((len(all_keywords), len(all_keywords)), dtype=int)
    # 创建DataFrame并设置索引和列名
    co_occurrence = pd.DataFrame(
        matrix_data,
        index=all_keywords,
        columns=all_keywords
    )

    # 填充矩阵（统计共现次数）
    for doc in documents:
        # 对文档中的每个关键词组合
        for a, b in combinations(doc, 2):
            # 确保关键词在矩阵中存在
            if a in all_keywords and b in all_keywords:
                co_occurrence.loc[a, b] += 1
                co_occurrence.loc[b, a] += 1

    return co_occurrence


def plot_heatmap(co_occurrence, title="Keyword Co-occurrence Heatmap", figsize=(12, 10)):
    """
    绘制关键词共现热力图
    """
    plt.figure(figsize=figsize)

    # 创建热力图
    sns.heatmap(
        co_occurrence,
        annot=True,  # 显示数值
        cmap="YlGnBu",  # 颜色方案
        linewidths=.5,
        fmt='d',  # 整数格式
        annot_kws={"size": 8}  # 注释文本大小
    )

    # 设置标题和标签
    plt.title(title, fontsize=14)
    plt.xlabel("Keywords", fontsize=12)
    plt.ylabel("Keywords", fontsize=12)

    # 调整标签方向
    plt.xticks(rotation=45, ha='right', fontsize=10)
    plt.yticks(rotation=0, fontsize=10)

    plt.tight_layout()
    plt.savefig("keyword_heatmap.png", dpi=300, bbox_inches='tight')
    plt.show()


def plt_wordcloud(clean_keywords, save_file = 'fig', save_path=f"wordcloud{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.png"):
    text = " ".join(clean_keywords)
    wordcloud = WordCloud(width=800, height=400, background_color='white').generate(text)
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    if save_file:
        if not os.path.exists(save_file):
            os.makedirs(save_file, exist_ok=True)
        save_path = os.path.join(save_file, save_path)
        plt.savefig(save_path, dpi=300)
    # plt.show()
    return save_path


if __name__ == '__main__':
    conn = pymysql.connect(
        host= "lib.gorio.top",
        user= "root",
        password= "t4hf647e",
        database= "rss",
        port= 13306
    )
    sql = "SELECT keywords FROM rss.papers WHERE keywords IS NOT NULL and ID > 1500 limit 500;"
    cursor = conn.cursor()
    cursor.execute(sql)
    results = cursor.fetchall()
    cursor.close()
    # print(results)
    dirty_keywords = []
    for keywords in results:
        if len(keywords) > 1:
            print(len(keywords))
            continue
        keywords = keywords[0]
        if not keywords.startswith("1"):
            # print(keywords)
            continue
        dirty_keywords.append(keywords)
        # print(keywords)
    clean_keywords  = clean_keyword_list(dirty_keywords)
    # 打印结果
    # print("提取后的纯净关键词:")
    # for keyword in clean_keywords:
    #     print(keyword)
    # for i, keyword in enumerate(clean_keywords, 1):
    #     print(f"{i}: {keyword}")
    save_path = plt_wordcloud(clean_keywords)
    print(save_path)
    appkeyid = "LTAI5tSoNNGmbHQ3UYGQdZkU"
    appkeysecret = "6CwBBw1EqVjB6towwMcoB33k5JKrJv"
    bucket_name = "gorio"
    # region = "oss-cn-beijing"
    region = "cn-beijing"

    res = upload_file_and_get_url(save_path, appkeyid, appkeysecret,
                                  bucket_name, region)
    print(res)
    template = ("你好，{{truename}}!<br><br> <div style='text-align: center; margin-bottom: 30px;'> <img src='{{wordcloud_url}}' alt='本期合集词云' style='max-width: 100%;'>  </div> ")
                # "以下是论文更新：<br> {% for site, items in feeds.items() %}<b>{{ site }}</b><br><ul>{% for item in items %}<br><br><li><a>论文名称：{{ item.title }}</a></li><li><a>论文作者：{{ item.authors }}</a></li><li><a>论文英文摘要：{{ item.summary }}</a></li><li><a>论文中文摘要：{{ item.paper_summary_chinese }}</a></li><li><a>论文关键词：{{ item.keywords }}</a></li><li>论文链接：<a href=\"{{ item.link }}\" target=\"_blank\">点击查看</a></li><br><br>{% endfor %}</ul>{% endfor %} <br><br> 会议结束倒计时：<br> {% for cc in confs %} <li><a>会议名称：{{ cc['Conference'] }}</a> <br> </li> 截止时间：{{ cc['Deadline'] }}</a></li><br> </li> 剩余时间：{{ cc['Days Left'] }} 天</a></li> <br> <li>会议链接：<a href=\"{{ cc['URL']}}\" target=\"_blank\">点击查看</a></li> <br>{% endfor %} <br><br> Powered by 郭振洋，Contact me：zyguo2020@163.com")

    tpl = Template(template)
    html = tpl.render(truename="user['truename']", wordcloud_url=res)
    smtp_conf ={"server": "smtp.163.com",
       "port": 25,
       "user_name": "lib_gorio@163.com",
       "password": "NLXIBGYTODCUYBGW",
       "domain": "163.com",
       "smtp_authentication": "login",
       "smtp_enable_starttls_auto": True,
       "smtp_tls": False,
       "email_from": "lib_gorio@163.com", }
    send_email(smtp_conf, 'zyguo2020@163.com', f"测试", html)
    print(html)


    # co_occurrence_matrix = create_co_occurrence_matrix([clean_keywords])
    #
    # # 打印矩阵预览
    # # print("\n关键词共现矩阵预览:")
    # # print(co_occurrence_matrix.head())
    #
    # # 3. 绘制热力图
    # plot_heatmap(co_occurrence_matrix, title="学术关键词共现热力图")
    # # 可选：过滤低频关键词（可选步骤）
    # min_occurrences = 2
    # keyword_counts = co_occurrence_matrix.sum(axis=1)  # 每行求和得到每个关键词的总出现次数
    # frequent_keywords = keyword_counts[keyword_counts >= min_occurrences].index
    #
    # # 创建过滤后的共现矩阵
    # filtered_matrix = co_occurrence_matrix.loc[frequent_keywords, frequent_keywords]
    #
    # # 绘制过滤后的热力图
    # if not filtered_matrix.empty:
    #     plot_heatmap(
    #         filtered_matrix,
    #         title=f"高频关键词共现热力图 (出现次数≥{min_occurrences})",
    #         figsize=(10, 8)
    #     )