# -*- coding: utf-8 -*-
import pytz
import logging
import os
import time
from datetime import datetime, time as dtime
import pymysql
import schedule
from jinja2 import Template
import sys
from pymysql.cursors import DictCursor

from config import load_config, setup_schedule
from fetch_top_4_conf_countdown import fetch_top4_conf_countdown, update_ical_url
from mail import send_email
from my_rss import fetch_rss_content, deal_with_dblp
from parse_args import parse_args
from register_self import register_self
from report_status import report_status
from scholar import fetch_paper_keywords
from translator import fetch_translator_result
from logging.handlers import TimedRotatingFileHandler, RotatingFileHandler

_default_logger_format = '%(asctime)s %(levelname)s %(message)s'
_default_logger_datefmt = '%Y-%m-%d %H:%M:%S'
_default_logger_level = 'info'
# 日志颜色配置
_log_colors = {
    'DEBUG': 'white',
    'INFO': 'green',
    'WARNING': 'yellow',
    'ERROR': 'red',
    'CRITICAL': 'bold_red',
}
start_time = None
begin_reason = None
program_start_time = None
# 判断当前时间是否在这个时间段内
def is_in_sleep_period(current_time, start_time, end_time):
    if start_time < end_time:
        return start_time <= current_time < end_time
    else:
        # 跨午夜时间段处理（如 23:00 到 08:00）
        return current_time >= start_time or current_time < end_time


# =========================
# 2. 配置加载
# =========================
def create_rotating_handler(filepath, level, fmt, datefmt, rotation_conf):
    os.makedirs(os.path.dirname(filepath), exist_ok=True)

    if rotation_conf.get('type') == 'time':
        handler = TimedRotatingFileHandler(
            filename=filepath,
            when=rotation_conf.get('when', 'midnight'),
            interval=rotation_conf.get('interval', 1),
            backupCount=rotation_conf.get('backup_count', 7),
            encoding='utf-8'
        )
    elif rotation_conf.get('type') == 'size':
        handler = RotatingFileHandler(
            filename=filepath,
            maxBytes=rotation_conf.get('max_bytes', 10 * 1024 * 1024),
            backupCount=rotation_conf.get('backup_count', 5),
            encoding='utf-8'
        )
    else:
        handler = logging.FileHandler(filepath, encoding='utf-8')

    handler.setLevel(level)
    handler.setFormatter(logging.Formatter(fmt, datefmt))
    return handler


def switch_log_level(_log_level):
    if _log_level.lower() == "info":
        return logging.INFO
    elif _log_level.lower() == "warn":
        return logging.WARNING
    elif _log_level.lower() == "error":
        return logging.ERROR
    else:
        return logging.INFO


# 自定义等级过滤器
class MaxLevelFilter(logging.Filter):
    def __init__(self, level):
        self.level = level

    def filter(self, record):
        return record.levelno <= self.level


def logger_conf(_log_conf):
    if _log_conf is None:
        _log_format = _default_logger_format
        _log_datefmt = _default_logger_datefmt
        _log_level = switch_log_level(_default_logger_level)
    else:
        _log_format = _log_conf.get('format', _default_logger_format)
        _log_datefmt = _log_conf.get('datefmt', _default_logger_datefmt)
        _log_level = switch_log_level(_log_conf.get('level', _default_logger_level))
    # handler = colorlog.StreamHandler(stream=sys.stdout)
    # formatter = colorlog.ColoredFormatter(
    #     _log_format,
    #     datefmt=_log_datefmt,
    #     log_colors=_log_colors
    # )
    # handler.setFormatter(formatter)
    # logging.basicConfig(
    #     level=_log_level,
    #     format=_log_format,
    #     datefmt=_log_datefmt,
    #     handlers=[handler]
    # )
    logger = logging.getLogger()
    logger.handlers.clear()
    logger.setLevel(logging.DEBUG)
    # 控制台输出
    stdout_handler = logging.StreamHandler(sys.stdout)
    stdout_handler.setLevel(logging.DEBUG)
    stdout_handler.addFilter(MaxLevelFilter(logging.INFO))
    stdout_handler.setFormatter(logging.Formatter(_log_format, _log_datefmt))

    stderr_handler = logging.StreamHandler(sys.stderr)
    stderr_handler.setLevel(logging.WARNING)
    stderr_handler.setFormatter(logging.Formatter(_log_format, _log_datefmt))

    logger.addHandler(stdout_handler)
    logger.addHandler(stderr_handler)

    # 文件输出配置
    file_conf = _log_conf.get('file') if _log_conf else None
    rotation_conf = _log_conf.get('rotation', {}) if _log_conf else {}

    if file_conf:
        normal_log = file_conf.get('normal')
        error_log = file_conf.get('error')

        if normal_log and error_log:
            # info 以下 -> normal.log
            normal_handler = create_rotating_handler(normal_log, logging.DEBUG, _log_format, _log_datefmt,
                                                     rotation_conf)
            normal_handler.addFilter(MaxLevelFilter(logging.INFO))
            logger.addHandler(normal_handler)

            # warning 及以上 -> error.log
            error_handler = create_rotating_handler(error_log, logging.WARNING, _log_format, _log_datefmt,
                                                    rotation_conf)
            logger.addHandler(error_handler)

        elif normal_log:
            # 所有日志都写入 normal.log
            all_handler = create_rotating_handler(normal_log, logging.DEBUG, _log_format, _log_datefmt, rotation_conf)
            logger.addHandler(all_handler)

    logging.basicConfig(
        level=_log_level,
        format=_log_format,
        datefmt=_log_datefmt,
        handlers=[stdout_handler, stderr_handler]
    )
    # # 清除默认 handler，添加新的
    # logger.handlers.clear()
    # logger.addHandler(stdout_handler)
    # logger.addHandler(stderr_handler)

    # logger = colorlog.getLogger()
    # logger.handlers.clear()  # 避免重复添加 handler
    # logger.addHandler(handler)
    # logger.setLevel(_log_level)
    # return logger


def fetch_rss(db_conf):
    conn = pymysql.connect(
        host=db_conf['ip'],
        user=db_conf['user'],
        password=db_conf['password'],
        database=db_conf['schema'],
        port=db_conf['port']
    )
    cursor = conn.cursor(DictCursor)
    cursor.execute("SELECT site_name, rss_url FROM rss")
    sites = cursor.fetchall()
    cursor.close()
    conn.close()
    return sites


def fetch_users(db_conf):
    conn = pymysql.connect(
        host=db_conf['ip'],
        database=db_conf['schema'],
        user=db_conf['user'],
        password=db_conf['password'],
        port=db_conf['port']
    )
    cursor = conn.cursor(DictCursor)
    cursor.execute("SELECT username, email, truename FROM user")
    users = cursor.fetchall()
    cursor.close()
    conn.close()
    return users


# def fetch_rss_content(sites):
#     results = {}
#     _rss_list = []
#     for site in sites:
#         if site['site_name'] in results:
#             _rss_list = results[site['site_name']]
#         else:
#             _rss_list = []
#             results[site['site_name']] = _rss_list
#         feed = feedparser.parse(site['rss_url'])
#
#         for entry in feed.entries:
#             if entry.summary is None or entry.summary == 'null':
#                 continue
#             _rss_list.append({
#                 'title': entry.title,  # 论文名
#                 'link': entry.link,  # 论文链接
#                 'summary': entry.summary,  # 论文摘要
#                 'published': entry.published,  # 发表时间
#                 'authors': entry.authors  # 作者
#             })
#         results[site['site_name']] = _rss_list
#     return results


def find_new_paper(rss_results, db_conf):
    conn = pymysql.connect(
        host=db_conf['ip'],
        user=db_conf['user'],
        password=db_conf['password'],
        database=db_conf['schema'],
        port=db_conf['port']  # 添加端口参数
    )
    cursor = conn.cursor(pymysql.cursors.DictCursor)
    # # 这里应该先处理一下 数据库里面应该发送，但是因为某种异常没有发送的论文
    sql = "select site_name, paper_name as title, paper_links as link, paper_summary as summary, paper_authors as authors from papers where issubmit = 0"
    cursor.execute(sql)
    res = cursor.fetchall()
    result_set = {}
    if res:
        for row in res:
            site_name = row['site_name']
            if site_name not in result_set:
                result_set[site_name] = []
            result_set[site_name].append(row)
    for site in result_set.keys():
        if site in rss_results:
            papers = rss_results[site]
            papers.extend(result_set[site])
            rss_results[site] = papers
        else:
            rss_results[site] = result_set[site]
    new_papers = {}
    for sites in rss_results.keys():
        if sites in new_papers:
            continue
        else:
            new_papers[sites] = []
            new_papers_list = []
            new_papers_sets = set()
        for papers in rss_results[sites]:
            # print(f"{sites}-->{papers['title']}")
            # print(f"select count(*) as co, issubmit from papers where paper_links = '{papers['link']}'")
            sql = "select count(*) as co, issubmit from papers where paper_links = %s"
            cursor.execute(sql, (papers['link']))
            res = cursor.fetchall()
            if res[0]['co'] == 0:
                sql = ("insert into papers (site_name, paper_name, paper_links, paper_summary, paper_authors) values ("
                       "%s, %s, %s, %s, %s)")
                # 如果这个数据库中没有这个论文，则插入数据库
                cursor.execute(sql, (
                    sites, papers['title'], papers['link'], papers['summary'] if papers['summary'] else 'not found',
                    papers['authors']))
                conn.commit()
                if papers['title'] not in new_papers_sets:
                    new_papers_sets.add(papers['title'])
                    new_papers_list.append(papers)
                else:
                    logging.warning(f"found duplicate paper in new_papers_list, skip it, {papers['title']} is not fetched or not sent")
                # 认为这个论文是新的，需要发送邮件
            elif res[0]['issubmit'] is not None and res[0]['issubmit'] == 0:
                # 如果这个数据库中有这个论文，但是这个论文没有被发送过，则发送邮件
                sql = "select paper_summary_chinese from papers where paper_links = %s"
                cursor.execute(sql, (papers['link']))
                res = cursor.fetchall()
                papers['paper_summary_chinese'] = res[0]['paper_summary_chinese'] if res[0][
                                                                                         'paper_summary_chinese'] is not None else ''
                # new_papers_list.append(papers)
                if papers['title'] not in new_papers_sets:
                    new_papers_sets.add(papers['title'])
                    new_papers_list.append(papers)
                else:
                    logging.warning(f"found duplicate paper in new_papers_list, skip it, {papers['title']} is fetched but not sent")
            # cursor.execute(f"insert into papers (site_name, paper_name, paper_links, paper_summary, paper_authors)
            # values ({sites, papers['title'], papers['link'], papers['summary'], papers['authors']})")
        new_papers[sites] = new_papers_list
    cursor.close()
    conn.close()

    return new_papers


def fetch_new_paper_chinese_abstract(new_papers, db_conf, translator_conf):
    # 2025年4月2日 这个太费钱了 搞不起
    # _api_key = translator_conf['api-keys']
    # _model = translator_conf['model']
    # prompt = translator_conf['prompt']
    # base_url = translator_conf['base_url']
    # client = OpenAI(
    #     api_key=_api_key,
    #     base_url=base_url
    # )
    # 2025年4月2日 改成百度翻译高级版 每月100万字符/月

    conn = pymysql.connect(
        host=db_conf['ip'],
        user=db_conf['user'],
        password=db_conf['password'],
        database=db_conf['schema'],
        port=db_conf['port']  # 添加端口参数
    )
    cursor = conn.cursor(pymysql.cursors.DictCursor)
    for sites in new_papers.keys():
        for paper in new_papers[sites]:
            if 'paper_summary_chinese' in paper.keys():
                if paper['paper_summary_chinese'] is not None and len(paper['paper_summary_chinese']) != 0:
                    continue
            logging.info(f"fetching paper {paper['title']} chinese abstract")
            # preference = translator_conf['preference']
            translator_result = fetch_translator_result(paper['summary'], translator_conf)
            # response = client.chat.completions.create(
            #     model="gemini-2.0-flash",
            #     n=1,
            #     messages=[
            #         {"role": "user", "content": f"{prompt}:{paper['summary']}"},
            #     ]
            # )
            # print(response.choices[0].message.content)
            sql = "update papers set paper_summary_chinese = %s where paper_links = %s"
            cursor.execute(sql, (translator_result, paper['link']))
            conn.commit()
            time.sleep(2)
            paper['paper_summary_chinese'] = translator_result
            logging.info(f"fetching paper {paper['title']} chinese abstract done!")
    cursor.close()
    conn.close()
    return new_papers

count = 0

args = None


def update_paper_status(new_papers, db_conf):
    if args.mode != 'debug':
        conn = pymysql.connect(
            host=db_conf['ip'],
            user=db_conf['user'],
            password=db_conf['password'],
            database=db_conf['schema'],
            port=db_conf['port']  # 添加端口参数
        )

        cursor = conn.cursor(pymysql.cursors.DictCursor)
        for site in new_papers.keys():
            # for site, new_paper in new_papers:
            new_paper = new_papers[site]
            for paper in new_paper:
                sql = "update papers set issubmit = %s where paper_links = %s"
                cursor.execute(sql, (True, paper['link']))
                conn.commit()
        cursor.close()
        conn.close()




def requirements_install(requirements_file_path):
    import platform

    if os.path.exists(requirements_file_path):
        logging.info(f"发现 {requirements_file_path} 文件，开始安装依赖...")

        system = platform.system()
        # print("正在安装 Python 3.10...")
        # if system == "Windows":
        #     os.system("winget install -e --id Python.Python.3.10")
        # elif system == "Linux":
        #     os.system("sudo apt update && sudo apt install -y python3.10 python3.10-venv")
        # elif system == "Darwin":  # macOS
        #     os.system("brew install python@3.10")
        # print("安装 Python 3.10 结束")

        print("正在安装依赖...")
        # os.system("python3.10 -m venv rss_client_venv")
        if system == "Windows":
            cmd = f"{sys.executable} -m pip install -r {requirements_file_path}"
            os.system(cmd)
        else:
            cmd = f"bash -c '{sys.executable} -m pip install -r {requirements_file_path} '"
            os.system(cmd)

        logging.info("依赖安装完成")
    else:
        logging.warning("未找到 requirements.txt 文件，跳过依赖安装")


def update_project():
    import subprocess
    try:
        result = subprocess.run(["git", "pull"], check=True, text=True, capture_output=True)
        logging.info(f"项目更新成功: {result.stdout}")
    except subprocess.CalledProcessError as e:
        logging.error(f"项目更新失败: {e.stderr}")


def main():
    # 设置北京时间
    beijing_tz = pytz.timezone('Asia/Shanghai')
    # 设置禁止执行的时间段：晚上 23:00 到 第二天早上 08:00
    start = dtime(23, 0, 0)  # 23:00
    end = dtime(8, 0, 0)  # 08:00

    global args
    global begin_reason
    args = parse_args()

    run_mode = args.mode



    # 加载配置文件
    config_path = args.config
    config = load_config(config_path)
    log_conf = config['log']
    logger_conf(log_conf)
    pid_file_path = config['server']['pid_file_path']

    print('working in mode:', run_mode)
    # 根据运行模式调整日志级别 daemon
    if run_mode == 'production':
        # config['log']['level'] = 'debug'
        # 不管是不是debug模式 都应该先运行一次
        schedule_str = setup_schedule(config, worker)
        logging.info(f"worker is scheduled every {schedule_str}")
        worker(config)
        begin_reason = "定时任务启动"
        while True:
            now = datetime.now(beijing_tz).time()
            if is_in_sleep_period(now, start, end):
                logging.info("当前时间为休眠时间段（23:00 - 08:00），程序不执行。")
                time.sleep(60)  # 休眠1分钟
            else:
                schedule.run_pending()
            time.sleep(1)
    elif run_mode == 'register':
        # 向系统注册本服务。
        register_self()
    elif run_mode == 'daemon':
        import daemon
        from daemon import DaemonContext
        from daemon.pidfile import PIDLockFile
        try:
            if os.path.exists(pid_file_path):
                with open(pid_file_path, 'r') as f:
                    try:
                        pid = int(f.read().strip())
                        # 检查进程是否存在
                        os.kill(pid, 0)
                        print(f"守护进程已在运行，PID: {pid}")
                        sys.exit(1)
                    except (ValueError, ProcessLookupError):
                        # 如果进程不存在或 PID 无效，删除旧的 PID 文件
                        os.remove(pid_file_path)
                        print(f"检测到无效的 PID 文件，已删除: {pid_file_path}")

            with daemon.DaemonContext(
                    pidfile=PIDLockFile(pid_file_path),
                    stdout=sys.stdout,
                    stderr=sys.stderr,
                    files_preserve=[handler.stream.fileno() for handler in logging.getLogger().handlers if
                                    hasattr(handler, 'stream')]
            ):

                print("successfully start daemon with pid:", os.getpid())
                worker(config)
                schedule_str = setup_schedule(config, worker)
                logging.info(f"worker is scheduled every {schedule_str}")
                begin_reason = "定时任务启动"
                while True:
                    now = datetime.now(beijing_tz).time()
                    if is_in_sleep_period(now, start, end):
                        logging.info("当前时间为休眠时间段（23:00 - 08:00），程序不执行。")
                        time.sleep(60)  # 休眠1分钟
                    else:
                        schedule.run_pending()
                    time.sleep(1)
        except Exception as e:
            print(f"Failed to start daemon: {e}")
    elif run_mode == 'stop':
        # 停止守护进程
        if os.path.exists(pid_file_path):
            with open(pid_file_path, 'r') as f:
                pid = int(f.read().strip())
            try:
                os.kill(pid, 9)  # 发送 SIGKILL 信号
                print(f"成功停止守护进程，PID: {pid}")
            except OSError as e:
                print(f"停止守护进程失败: {e}")
        else:
            print("没有找到守护进程的 PID 文件。")
    elif run_mode == 'kill':
        _args_pid = args.pid
        if _args_pid:
            # 检查 _args_pid 是否为当前程序或其子进程的 PID
            current_pid = os.getpid()
            if _args_pid != current_pid:
                try:
                    with open(pid_file_path, 'r') as f:
                        child_pid = int(f.read().strip())
                        if _args_pid != child_pid:
                            raise ValueError(f"指定的 PID {_args_pid} 不是当前程序或其子进程的 PID")
                        else:
                            os.kill(_args_pid, 9)  # 杀死当前进程
                            print(f"成功杀死子进程，PID: {_args_pid}")
                            sys.exit(0)
                except (FileNotFoundError, ValueError):
                    raise ValueError(f"指定的 PID {_args_pid} 无效或无法验证")
            else:
                os.kill(_args_pid, 9)  # 杀死当前进程
                print(f"成功杀死当前进程，PID: {_args_pid}")
                sys.exit(0)
        _args_pid_file = args.pidfile
        if _args_pid_file:
            # 检查 _args_pid_file 是否为当前程序或其子进程的 PID 文件
            if _args_pid_file != pid_file_path:
                raise ValueError(f"指定的 PID 文件 {_args_pid_file} 不是当前程序或其子进程的 PID 文件")
            else:
                try:
                    with open(_args_pid_file, 'r') as f:
                        pid = int(f.read().strip())
                        os.kill(pid, 9)  # 杀死当前进程
                        print(f"成功杀死当前进程，PID: {pid}")
                        sys.exit(0)
                except (FileNotFoundError, ValueError):
                    raise ValueError(f"指定的 PID 文件 {_args_pid_file} 无效或无法验证")
    elif run_mode == 'install':
        requirements_file_path = args.requirements_file
        requirements_install(requirements_file_path)
        update_project()
        ensure_table_exists(config['db'])
        # requirements_path = os.path.join(os.getcwd(), 'requirements.txt')
        # if os.path.exists(requirements_path):
        #     logging.info("发现 requirements.txt 文件，开始安装依赖...")
        #
        #     os.system('')
        #     os.system(f"python3 -m pip install -r {requirements_path}")
        #     logging.info("依赖安装完成")
        # else:
        #     logging.warning("未找到 requirements.txt 文件，跳过依赖安装")
    elif run_mode == 'update':
        update_project()
    elif run_mode == 'report':
        auto_conform = args.auto_confirm if hasattr(args, 'auto_confirm') else False
        report_status(pid_file_path, auto_conform, log_conf)
    else:
        # debug模式下 直接执行一次
        begin_reason = "调试任务启动"
        # print([handler.stream.fileno() for handler in logging.getLogger().handlers if
        #        hasattr(handler, 'stream')])
        worker(config)


def send_status_worker(config, smtp_conf, serverchan_sdk=None):
    global count
    global start_time
    global begin_reason
    global args
    preference = config['report']['preference']
    if preference is None or len(preference) == 0:
        logging.warning("没有配置报告的偏好设置，跳过报告发送")
        return
    if preference == 'smtp':
        report_smtp_config = config['report']['smtp']
        run_mode = args.mode
        # template = '你好，大哥!<br><br> 这是一个自动生成的邮件。<br><br> RSS 客户端状态报告。<br><br> 第 {count} 次执行。<br><br> 运行模式：{run_mode}<br><br> {finish_time}已完成本次运行<br><br> <br><br> 祝好!<br><br> RSS客户端'
        template = report_smtp_config['template']
        # title = 'rss_client_status report'
        title =  report_smtp_config['title']
        to_addr = report_smtp_config['to_addr']
        tpl = Template(template)
        now = datetime.now(pytz.timezone("Asia/Shanghai")).strftime('%Y-%m-%d %H:%M:%S')
        html = tpl.render(count=count, run_mode=run_mode, finish_time= now, begin_reason=begin_reason, start_time=start_time, program_start_time = program_start_time)
        send_email(smtp_conf, to_addr, f"{title}-{now}-{count}次", html, email_from_name= "状态上报")
    elif preference == 'server_jiang':
        send_key = config['report']['server_jiang']['send_key']
        title = config['report']['server_jiang']['title']
        desp = config['report']['server_jiang']['desp']
        from serverchan_sdk import sc_send
        response = sc_send(send_key, title, desp, {"count": count, "run_mode": args.mode, "finish_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
        logging.info(response)
    else:
        logging.warning(f"不支持的报告偏好设置: {preference}，跳过报告发送")
        return



def worker(config):
    global start_time
    start_time = datetime.now(pytz.timezone("Asia/Shanghai")).strftime('%Y-%m-%d %H:%M:%S')
    global count
    logging.info(f"任务第 {count} 次执行 at {time.strftime('%Y-%m-%d %H:%M:%S')}")
    count += 1
    db_conf = config['db']
    smtp_conf = config['smtp']

    serpapi_conf = config['scholar']['serpapi']
    translator_conf = config['translator']

    # os.environ["serpapi_api_key"] = serpapi_conf['api-key'] 2025年5月21日 如果要支持list的格式的key的话，那么这里就不能使用环境变量传值
    # logging.info(f"config = {config}")
    # logging.warning(f"config = {config}")
    # logging.error(f"config = {config}")
    # sys.exit(0)
    ensure_table_exists(db_conf)
    sites = fetch_rss(db_conf)
    rss_results = fetch_rss_content(sites)
    rss_results = deal_with_dblp(db_conf, rss_results, serpapi_conf)
    logging.info(f"rss_results = {rss_results}")
    # rss_results = {
    #                         'NDSS 2025': [{
    #                             'title': 'On Borrowed Time - Preventing Static Side-Channel Analysis.',
    #                             'link': 'https://www.ndss-symposium.org/ndss-paper/on-borrowed-time-preventing-static-side-channel-analysis/',
    #                             'summary': '—In recent years a new class of side-channel attacks has emerged. Instead of targeting device emissions during dynamic computation, adversaries now frequently exploit the leakage or response behaviour of integrated circuits in a static state. Members of this class include Static Power Side-Channel Analysis (SCA), Laser Logic State Imaging (LLSI) and Impedance Analysis (IA). Despite relying on different physical phenomena, they all enable the extraction of sensitive information from circuits in a static state with high accuracy and low noise – a trait that poses a significant threat to many established side-channel countermeasures. In this work, we point out the shortcomings of existing solutions and derive a simple yet effective countermeasure. We observe that in order to realise their full potential, static side-channel attacks require the targeted data to remain unchanged for a certain amount of time. For some cryptographic secrets this happens naturally, for others it requires stopping the target circuit’s clock. Our proposal, called Borrowed Time, hinders an attacker’s ability to leverage such idle conditions, even if full control over the global clock signal is obtained. For that, by design, key-dependent data may only be present in unprotected temporary storage (e.g. flip-flops) when strictly needed. Borrowed Time then continuously monitors the target circuit and upon detecting an idle state, securely wipes sensitive contents. We demonstrate the need for our countermeasure and its effectiveness by mounting practical static power SCA attacks against cryptographic systems on FPGAs, with and without Borrowed Time. In one case we attack a masked implementation and show that it is only protected with our countermeasure in place. Furthermore we demonstrate that secure on-demand wiping of sensitive data works as intended, affirming',
    #                             'published': 'MON, 24 FEB 2025 09:17:26 -0400',
    #                             'authors': 'Robert Dumitru 0002;Thorben Moos;Andrew Wabnitz;Yuval Yarom;'
    #                         }]
    #                         }
    new_papers = find_new_paper(rss_results, db_conf)
    logging.info(f"new_papers = {new_papers}")
    # 对于新的论文 或者说 尚未推送的论文。 先获取他们的中文摘要，
    new_papers = fetch_new_paper_chinese_abstract(new_papers, db_conf, translator_conf)
    new_papers = fetch_paper_keywords(new_papers, config)
    final_new_papers = {}
    for site in new_papers.keys():
        if new_papers[site] is not None and len(new_papers[site]) > 0:
            final_new_papers[site] = new_papers[site]
    new_papers = final_new_papers


    # print(new_papers)
    # print(rss_results)
    send_email_worker(config, db_conf, new_papers, smtp_conf)
    send_status_worker(config, smtp_conf)
    # for user_id, feeds in subscriptions.items():
    #     user = users[user_id]
    #     feed_results = {}
    #
    #     for site in feeds:
    #         items = fetch_rss_content(site['rss_url'])
    #         feed_results[site['site_name']] = items
    #
    #     if feed_results:
    #         tpl = Template(EMAIL_TEMPLATE)
    #         html = tpl.render(nickname=user['nickname'], feeds=feed_results)
    #         send_email(smtp_conf, user['email'], "您的RSS更新摘要", html)


def send_email_worker(config, db_conf, new_papers, smtp_conf):
    global args
    run_mode = args.mode
    mail_conf = config['email']
    template = mail_conf['template']
    title = mail_conf['title']
    save_path_ical = "data/deadlines-all.ical"
    if not os.path.exists(save_path_ical):
        update_ical_url()
    conf_list = fetch_top4_conf_countdown(save_path_ical)
    if len(new_papers) > 0:
        #     只有有需要推送的论文的时候 才会推送
        user_list = fetch_users(db_conf)
        now = datetime.now()
        for user in user_list:
            if run_mode != 'debug':
                tpl = Template(template)
                html = tpl.render(truename=user['truename'], feeds=new_papers, confs=conf_list)
                send_email(smtp_conf, user['email'], f"{title}-{now.strftime('%Y-%m-%d')}", html)
            else:  # debug模式下 只推送给我自己
                if user['email'] == 'zyguo2020@163.com':
                    tpl = Template(template)
                    html = tpl.render(truename=user['truename'], feeds=new_papers)
                    send_email(smtp_conf, user['email'], f"{title}-{now.strftime('%Y-%m-%d')}", html)
        update_paper_status(new_papers, db_conf)


def ensure_table_exists(db_conf, table_name="rss", sql_file_path="sql/Dump20250604.sql"):
    """
    检查数据库中是否存在某张表，如果不存在则执行给定 SQL 文件进行创建
    """
    try:
        # 连接数据库
        conn = pymysql.connect(
            host=db_conf['ip'],
            user=db_conf['user'],
            password=db_conf['password'],
            database=db_conf['schema'],
            # host=host,
            # user=user,
            # password=password,
            # database=database,
            port=db_conf['port']
        )
        cursor = conn.cursor()

        # 查询是否存在该表
        cursor.execute("""
            SELECT COUNT(*)
            FROM information_schema.tables
            WHERE table_name = %s
        """, table_name)
        exists = cursor.fetchone()[0] > 0

        if exists:
            logging.info(f"表 `{table_name}` 已存在")
        else:
            logging.warning(f"表 `{table_name}` 不存在，开始执行建表 SQL...")
            with open(sql_file_path, 'r', encoding='utf-8') as f:
                sql_script = f.read()
                cursor.execute(sql_script)
                conn.commit()
            logging.info(f"表 `{table_name}` 创建成功")

    except Exception as e:
        logging.error(f"❌ 执行失败: {e}")

    finally:
        cursor.close()
        conn.close()


if __name__ == '__main__':
    program_start_time = datetime.now(pytz.timezone("Asia/Shanghai")).strftime('%Y-%m-%d %H:%M:%S')
    # global begin_reason
    begin_reason = "调用任务启动"
    main()
