import argparse
import json
import logging
import os
import zipfile
from datetime import datetime
from logging.handlers import RotatingFileHandler
import threading
import time
import schedule

import gradio as gr
from dotenv import load_dotenv

from source.application.app import XHS

load_dotenv()
# 初始化 XHS 实例
xhs = XHS()
current_dir = os.path.dirname(os.path.abspath(__file__))
log_folder = os.path.join(current_dir, 'Log')
os.makedirs(log_folder, exist_ok=True)
# 包装 __deal_extract 方法，使其适合 Gradio 的输入输出
download_and_extract_log_file_prefix = "xhs批量作品采集"

# 定时任务存储字典
scheduled_tasks = {}
task_counter = 0

# 定时任务调度线程
scheduler_thread = None
scheduler_running = False


def start_scheduler_thread():
    """启动调度器线程"""
    global scheduler_thread, scheduler_running
    if scheduler_thread is None or not scheduler_running:
        scheduler_running = True
        scheduler_thread = threading.Thread(target=run_scheduler, daemon=True)
        scheduler_thread.start()
        print("定时任务调度器已启动")


def run_scheduler():
    """运行调度器"""
    global scheduler_running
    while scheduler_running:
        schedule.run_pending()
        time.sleep(1)


def stop_scheduler_thread():
    """停止调度器线程"""
    global scheduler_running
    scheduler_running = False
    schedule.clear()
    print("定时任务调度器已停止")


def add_scheduled_task(task_time, author_urls, recent_count):
    """添加定时任务"""
    global task_counter
    task_counter += 1
    task_id = f"task_{task_counter}_{int(time.time())}"

    scheduled_tasks[task_id] = {
        "time": task_time,
        "author_urls": author_urls,
        "recent_count": recent_count,
        "active": True
    }

    # 使用schedule库安排任务，并添加任务ID作为标签
    schedule.every().day.at(task_time).do(run_scheduled_task, task_id).tag(task_id)

    # 启动调度器线程（如果尚未启动）
    start_scheduler_thread()

    return f"定时任务已创建，任务ID: {task_id}，执行时间: 每天 {task_time}"


explore_table_name = os.getenv("EXPLORE_TABLE_NAME", "s_xhs_explore_data")

user_info_table_name = os.getenv("USER_INFO_TABLE_NAME", "s_xhs_user_info")


# 添加数据库同步功能
def sync_explore_data_to_remote(db_config):
    """
    将Download文件夹下的ExploreData.db sqlite数据全量同步到远程MySQL数据库中
    """
    try:
        # 从环境变量获取数据库配置

        # 如果没有配置数据库，则跳过同步
        if not all([db_config["host"], db_config["user"], db_config["password"], db_config["database"]]):
            print("未配置远程数据库，跳过数据同步")
            return

        # 获取ExploreData.db路径
        download_path = os.path.join(current_dir, 'Download')
        db_path = os.path.join(download_path, 'ExploreData.db')

        # 检查数据库文件是否存在
        if not os.path.exists(db_path):
            print("ExploreData.db 文件不存在，跳过数据同步")
            return

        # 连接本地SQLite数据库
        import sqlite3
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()

        # 读取所有数据
        cursor.execute(f"SELECT * FROM explore_data")
        rows = cursor.fetchall()

        # 获取列名
        column_names = [description[0] for description in cursor.description]

        # 关闭本地数据库连接
        conn.close()

        # 同步到MySQL数据库
        sync_to_mysql(db_config, column_names, rows)


    except Exception as e:
        print(f"同步数据到远程数据库时出错: {str(e)}")


# 字段映射：中文 -> (英文字段名, 数据类型, 是否非空, 注释)
FIELD_MAPPING = {
    "采集时间": ("collect_time", "TEXT", False, "采集时间"),
    "作品ID": ("note_id", "VARCHAR(191)", True, "作品ID"),
    "作品类型": ("note_type", "TEXT", False, "作品类型"),
    "作品标题": ("title", "TEXT", False, "作品标题"),
    "作品描述": ("description", "TEXT", False, "作品描述"),
    "作品标签": ("tags", "TEXT", False, "作品标签"),
    "发布时间": ("publish_time", "TEXT", False, "发布时间"),
    "最后更新时间": ("last_updated_time", "TEXT", False, "最后更新时间"),
    "收藏数量": ("collect_count", "TEXT", False, "收藏数量"),
    "评论数量": ("comment_count", "TEXT", False, "评论数量"),
    "分享数量": ("share_count", "TEXT", False, "分享数量"),
    "点赞数量": ("like_count", "TEXT", False, "点赞数量"),
    "作者昵称": ("author_nickname", "TEXT", False, "作者昵称"),
    "作者ID": ("author_id", "TEXT", False, "作者ID"),
    "作者链接": ("author_url", "TEXT", False, "作者链接"),
    "作品链接": ("note_url", "TEXT", False, "作品链接"),
    "下载地址": ("download_url", "TEXT", False, "下载地址"),
    "动图地址": ("gif_url", "TEXT", False, "动图地址"),
}


def sync_to_mysql(db_config, column_names, rows):
    """
    同步数据到MySQL数据库
    """
    try:
        import pymysql

        # 创建MySQL连接
        mysql_conn = pymysql.connect(
            host=db_config.get("host", "localhost"),
            port=db_config.get("port", 3306),
            user=db_config.get("user", ""),
            password=db_config.get("password", ""),
            database=db_config.get("database", ""),
            charset='utf8mb4',
            cursorclass=pymysql.cursors.DictCursor
        )

        try:
            with mysql_conn.cursor() as cursor:
                # 创建表（如果不存在）
                # 修改表结构，将TEXT类型的主键改为VARCHAR(255)
                columns_definitions = []
                for col in column_names:
                    if col in FIELD_MAPPING:
                        field_name, data_type, is_not_null, comment = FIELD_MAPPING[col]
                        if is_not_null:
                            columns_definitions.append(f"`{field_name}` {data_type} NOT NULL COMMENT '{comment}'")
                        else:
                            columns_definitions.append(f"`{field_name}` {data_type} COMMENT '{comment}'")

                # columns_definitions = []
                # for col in column_names:
                #     if col == "作品ID":
                #         columns_definitions.append(f"`{col}` VARCHAR(191) NOT NULL")
                #     else:
                #         columns_definitions.append(f"`{col}` TEXT")

                create_table_sql = f"""
                CREATE TABLE IF NOT EXISTS {explore_table_name} (
                    {", ".join(columns_definitions)}
                    # PRIMARY KEY (`作品ID`)
                ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
                """
                cursor.execute(create_table_sql)

                if rows:
                    # 准备插入语句
                    placeholders = ", ".join(["%s"] * len(column_names))
                    columns_str = ", ".join([f"`{FIELD_MAPPING[col][0]}`" for col in column_names])
                    # 先清空再插入（显式的全量插入）
                    cursor.execute(f"TRUNCATE TABLE {explore_table_name}")
                    insert_sql = f"""
                                        INSERT INTO {explore_table_name} ({columns_str})
                                        VALUES ({placeholders})
                    # ON DUPLICATE KEY UPDATE
                    # {", ".join([f"`{col}` = VALUES(`{col}`)" for col in column_names if col != "作品ID"])}
                    """

                    # 批量插入或更新数据
                    cursor.executemany(insert_sql, rows)

            # 提交事务
            mysql_conn.commit()
            print(f"成功同步 {len(rows)} 条记录到MySQL数据库")

        finally:
            mysql_conn.close()
        print("ExploreData.db 数据已同步到远程MySQL数据库")
    except ImportError:
        print("缺少 pymysql 库，请安装: pip install pymysql")
    except Exception as e:
        print(f"同步到 MySQL 数据库时出错: {str(e)}")


def sync_user_infos_to_mysql(db_config, user_infos):
    """
    将用户信息同步到MySQL数据库
    """
    try:
        import pymysql

        # 创建MySQL连接
        mysql_conn = pymysql.connect(
            host=db_config.get("host", "localhost"),
            port=db_config.get("port", 3306),
            user=db_config.get("user", ""),
            password=db_config.get("password", ""),
            database=db_config.get("database", ""),
            charset='utf8mb4',
            cursorclass=pymysql.cursors.DictCursor
        )

        try:
            with mysql_conn.cursor() as cursor:
                # 创建用户信息表（如果不存在）
                columns_definitions = [
                    "`red_id` VARCHAR(191) COMMENT '小红书用户ID'",
                    "`nickname` TEXT COMMENT '昵称'",
                    "`bio` TEXT COMMENT '个人简介'",
                    "`images` TEXT COMMENT '头像链接'",
                    "`follows` INT DEFAULT 0 COMMENT '关注数'",
                    "`fans` INT DEFAULT 0 COMMENT '粉丝数'",
                    "`interaction` INT DEFAULT 0 COMMENT '获赞与收藏'",
                    "`created_at` DATETIME DEFAULT CURRENT_TIMESTAMP COMMENT '采集时间'"
                ]

                create_table_sql = f"""
                CREATE TABLE IF NOT EXISTS {user_info_table_name} (
                    {", ".join(columns_definitions)}
                ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
                """
                cursor.execute(create_table_sql)

                if user_infos:
                    # 准备插入语句
                    # 准备插入语句
                    placeholders = ", ".join(["%s"] * len(columns_definitions))
                    # 修复：正确提取字段名
                    column_names = [col.split(' ')[0] for col in columns_definitions]
                    columns_str = ", ".join(column_names)

                    insert_sql = f"""
                    INSERT INTO {user_info_table_name} ({columns_str})
                    VALUES ({placeholders})
                    # ON DUPLICATE KEY UPDATE
                    # {", ".join([f"`{col.split(' ')[0]}` = VALUES(`{col.split(' ')[0]}`)" for col in columns_definitions if col.split(' ')[0] != "red_id"])}
                    """

                    # 批量插入或更新数据
                    values_list = []
                    for user_info in user_infos:
                        values = [
                            user_info.get('red_id', ''),
                            user_info.get('nickname', ''),
                            user_info.get('desc', ''),
                            user_info.get('images', ''),
                            user_info.get('follows', 0),
                            user_info.get('fans', 0),
                            user_info.get('interaction', 0),
                            datetime.now()
                        ]
                        values_list.append(values)

                    cursor.executemany(insert_sql, values_list)

            # 提交事务
            mysql_conn.commit()
            print(f"成功同步 {len(user_infos)} 条用户信息到MySQL数据库")

        finally:
            mysql_conn.close()

    except ImportError:
        print("缺少 pymysql 库，请安装: pip install pymysql")
    except Exception as e:
        print(f"同步用户信息到 MySQL 数据库时出错: {str(e)}")


def run_scheduled_task(task_id):
    """运行定时任务"""
    if task_id not in scheduled_tasks:
        return

    task = scheduled_tasks[task_id]
    try:
        print(f"开始执行定时任务 {task_id}")

        # 包装异步函数以便在同步上下文中调用
        import asyncio
        async def async_operations():
            note_urls_text, _, user_infos = await extract_note_urls(task["author_urls"], task["recent_count"])
            print(f'user_infos:\n{user_infos}')
            db_config = {
                "host": os.getenv("MYSQL_HOST", "localhost"),
                "port": int(os.getenv("MYSQL_PORT", 3306)),
                "user": os.getenv("MYSQL_USER", ""),
                "password": os.getenv("MYSQL_PASSWORD", ""),
                "database": os.getenv("MYSQL_DATABASE", "")
            }

            if all([db_config["host"], db_config["user"], db_config["password"], db_config["database"]]):
                # user_infos 的数据同步到数据库中，使用 中文作为 字段备注 ，英文作为字段名称
                # 提取用户信息
                sync_user_infos_to_mysql(db_config, user_infos)
                # pass

            # 执行采集
            await process_notes(note_urls_text, False, True)
            # 执行采集完成后，将Download文件夹下的ExploreData.db sqlite数据全量同步到远程数据库中(所有字段都进行同步）
            sync_explore_data_to_remote(db_config)

        asyncio.run(async_operations())
        print(f"定时任务 {task_id} 执行完成")
    except Exception as e:
        print(f"定时任务 {task_id} 执行失败: {str(e)}")


def setup_logger(file_prefix):
    log_file_name = datetime.now().strftime('_%Y年%m月%d日%H时%M分%S秒.log')
    log_file_path = os.path.join(current_dir, 'Log', file_prefix + log_file_name)
    logger = logging.getLogger()
    logger.setLevel(os.getenv("LOG_LEVEL", "INFO"))

    # 避免重复添加 handler
    if not logger.handlers:
        # 创建一个RotatingFileHandler，设置最大文件数为3，每个文件大小为1MB
        handler = RotatingFileHandler(log_file_path, maxBytes=1024 * 1024, backupCount=3, encoding='utf-8-sig')
        handler.setLevel(logging.DEBUG)

        # 创建一个格式化器并将其添加到处理器
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(lineno)d - %(message)s')
        handler.setFormatter(formatter)

        # 将处理器添加到日志记录器
        logger.addHandler(handler)

    # 添加自定义 write 方法
    def write(msg, level=logging.INFO, scroll_end=True):
        if level == logging.INFO:
            logger.info(msg)
        elif level == logging.DEBUG:
            logger.debug(msg)
        elif level == logging.WARNING:
            logger.warning(msg)
        elif level == logging.ERROR:
            logger.error(msg)
        elif level == logging.CRITICAL:
            logger.critical(msg)
        else:
            logger.info(msg)

    # 绑定 write 方法到 logger 实例
    logger.write = write

    return logger


def read_download_and_extract_logs():
    return read_logs(download_and_extract_log_file_prefix)


# 读取 setting.json 文件
def read_setting():
    setting_path = os.path.join(current_dir, "settings.json")
    if not os.path.exists(setting_path):
        with open(setting_path, 'w', encoding='utf-8-sig') as f:
            json.dump({"cookie": ""}, f)
    try:
        with open(setting_path, 'r', encoding='utf-8-sig') as f:
            return json.load(f)
    except Exception as e:
        print(f"读取 settings.json 失败，错误：{e}")
        print(f"文件路径：{setting_path}")
        with open(setting_path, 'rb') as f:
            raw = f.read(100)
            print(f"文件前100字节内容（十六进制）：{raw.hex()}")
        raise


# 写入 setting.json 文件
def write_setting(data):
    setting_path = os.path.join(current_dir, "settings.json")
    with open(setting_path, 'w', encoding='utf-8-sig') as f:
        json.dump(data, f, indent=4, ensure_ascii=False)


def read_logs(file_prefix=""):
    """
    读取并返回爬虫日志文件内容
    :return: 包含 INFO 级别日志的字符串
    """
    try:
        # 查找以 file_prefix 开头的最新日志文件
        log_files = [f for f in os.listdir(log_folder) if f.startswith(file_prefix) and f.endswith('.log')]
        if not log_files:
            return "无采集日志"
        # 按文件名排序以找到最新的文件
        latest_log_file = None
        latest_time = None
        for d in log_files:
            try:
                # 解析文件夹名称（task_id）为时间戳
                dir_time = datetime.strptime(d, file_prefix + "_%Y年%m月%d日%H时%M分%S秒.log")
                if latest_time is None or dir_time > latest_time:
                    latest_time = dir_time
                    latest_log_file = d
            except ValueError:
                continue  # 忽略不符合格式的文件夹

        latest_log_file_path = os.path.join(log_folder, latest_log_file)

        if os.path.exists(latest_log_file_path):
            with open(latest_log_file_path, 'r', encoding='utf-8-sig') as f:
                new_content = f.read()
                if new_content:
                    info_logs = [line for line in new_content.splitlines() if 'INFO' in line]
                    return '\n'.join(info_logs)
        return "日志文件不存在"
    except Exception as e:
        return f"读取日志文件失败: {str(e)}"


async def process_url(url: str, download: bool = False, data: bool = True):
    logger = setup_logger(download_and_extract_log_file_prefix)
    if not url:
        return
    # 使用同步方式运行异步函数
    from source import Settings
    xhs = XHS(**Settings().run())

    async def _extract():
        async with xhs:
            return await xhs.extract(url, download=download, data=data, log=logger, bar=None)

    result = await _extract()
    print(result)
    return result


def refresh_zip_files():
    """
    刷新 .zip 文件列表
    :return: 返回最新的 .zip 文件列表
    """
    zip_dir = os.getenv("ZIP_DIR", "zips")
    zip_path = os.path.join(current_dir, zip_dir)
    if not os.path.exists(zip_path):
        os.makedirs(zip_path, exist_ok=True)
    return [os.path.join(zip_path, f) for f in os.listdir(zip_path) if f.endswith('.zip')]


def download_folder_or_files(paths):
    """
    将选中的文件夹或文件打包为 .zip 文件并提供采集链接
    :param paths: 选中的文件夹或文件路径列表
    :return: .zip 文件路径
    """
    if not paths:
        # logging.warning("用户未选择任何文件或文件夹")
        return None

    # 读取环境变量指定的目录
    zip_dir = os.getenv("ZIP_DIR", "zips")
    timestamp = datetime.now().strftime('%Y%m%d%H%M%S')  # 生成年月日时分秒格式的时间戳
    if len(paths) > 1:
        zip_filename = f"多文件_{timestamp}.zip"
    else:
        zip_filename = f"{os.path.basename(paths[0])}_{timestamp}.zip"
    zip_path = os.path.join(current_dir, zip_dir, zip_filename)
    os.makedirs(os.path.dirname(zip_path), exist_ok=True)

    try:
        with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
            for path in paths:
                # if os.path.isdir(path):
                #     for root, _, files in os.walk(path):
                #         for file in files:
                #             file_path = os.path.join(root, file)
                #             # 使用相对路径
                #             arcname = os.path.relpath(file_path, start=path)
                #             zipf.write(file_path, arcname)
                #             # logging.info(f"添加文件到 ZIP: {arcname}")
                if os.path.isfile(path):
                    # 直接使用文件名
                    arcname = os.path.basename(path)
                    zipf.write(path, arcname)
                    # logging.info(f"添加文件到 ZIP: {arcname}")
        # logging.info(f"ZIP 文件创建成功: {zip_path}")
        return zip_path
    except Exception as e:
        # logging.error(f"创建 ZIP 文件失败: {str(e)}")
        return None


def generateNoteUrls(data):
    if not all(isinstance(item, (list, tuple)) and len(item) == 2 for item in data):
        raise ValueError("Each item in data must be a list or tuple of length 2")
    return [
        f"https://www.xiaohongshu.com/discovery/item/{id}?source=webshare&xhsshare=pc_web&xsec_token={token}&xsec_source=pc_share"
        for id, token in data
    ]


async def process_notes(note_urls_text: str, download: bool, data: bool):
    if not note_urls_text.strip():
        return {"error": "没有作品链接可采集"}

    urls = note_urls_text.split()
    results = []

    for url in urls:
        try:
            # print(f"正在处理: {url}")
            result = await process_url(url, download, data)
            results.append({url: result})
        except Exception as e:
            results.append({url: f"处理出错: {str(e)}"})

    return results


# def sync_process_notes(note_urls_text, download=True, data=True):
#     """同步包装的process_notes函数"""
#     import asyncio
#     return asyncio.run(process_notes(note_urls_text, download, data))


async def extract_note_urls(author_urls: str, recent_count: int = 10):
    urls = author_urls.split()
    all_note_urls = []

    # 存储所有用户信息的列表
    user_infos = []

    # 分离作者主页链接和作品链接
    author_profile_urls = [url for url in urls if "user/profile" in url]
    direct_note_urls = [url for url in urls if "discovery/item" in url or "xhslink.com" in url]

    # 直接添加已有的作品链接
    all_note_urls.extend(direct_note_urls)

    if author_profile_urls:
        from playwright.async_api import async_playwright
        async with async_playwright() as p:
            browser = await p.chromium.launch(headless=True)
            context = await browser.new_context()

            # 读取 Cookie
            cookie_str = read_setting().get("cookie", "")
            if cookie_str:
                # 解析 cookie 字符串为字典
                cookies_dict = {}
                for cookie in cookie_str.split('; '):
                    key, value = cookie.split('=', 1)
                    cookies_dict[key] = value

                # 转换为 cookie 对象数组
                cookies = [
                    {"name": name, "value": value, "domain": ".xiaohongshu.com", "path": "/"}
                    for name, value in cookies_dict.items()
                ]
                await context.add_cookies(cookies)

            page = await context.new_page()
            for url in author_profile_urls:
                try:
                    await page.goto(url, timeout=30000)
                    await page.wait_for_timeout(5000)  # 等待页面加载

                    # 获取用户基本信息
                    user_data = await page.evaluate('() => window.__INITIAL_STATE__?.user.userPageData._rawValue')
                    user_info = {}
                    # if not user_data or len(user_data) == 0 or not user_data['basicInfo'] or user_data[
                    #     'basicInfo'] == {}:


                    if user_data:
                        basicInfo = user_data.get('basicInfo', {})
                        # 提取用户信息
                        user_info['desc'] = basicInfo.get('desc', '')
                        user_info['images'] = basicInfo.get('images', '')
                        user_info['nickname'] = basicInfo.get('nickname', '')
                        user_info['red_id'] = basicInfo.get('redId', '')

                        # 提取互动数据
                        interactions = user_data.get('interactions', [])
                        user_info['follows'] = next(
                            (item['count'] for item in interactions if item['type'] == 'follows'), 0)
                        user_info['fans'] = next((item['count'] for item in interactions if item['type'] == 'fans'), 0)
                        user_info['interaction'] = next(
                            (item['count'] for item in interactions if item['type'] == 'interaction'), 0)
                        print(f"user_info：\n{user_info}")
                        if user_info['nickname'] != "":
                            user_infos.append(user_info)
                    # 获取笔记数据
                    notes_raw_value = await page.evaluate(
                        '() => window.__INITIAL_STATE__?.user.notes._rawValue')

                    # 增加对 notes_raw_value 的健壮性检查
                    if not notes_raw_value or len(notes_raw_value) == 0 or len(notes_raw_value[0]) == 0:
                        print(f"无法找到有效的笔记数据: {url}")
                        continue

                    # 增加最近多个条的数字输入框，recent_nums数字数据输入框，默认为10

                    # 确保不会超出数组边界，如果recent_count大于数组长度，则使用数组实际长度
                    actual_count = min(int(recent_count), len(notes_raw_value[0]))
                    for note in notes_raw_value[0][:actual_count]:
                        if 'id' not in note or 'xsecToken' not in note:
                            print(f"笔记数据结构不完整: {url}")
                            continue

                        note_id = note['id']
                        token = note['xsecToken']
                        note_url = generateNoteUrls([[note_id, token]])[0]
                        all_note_urls.append(note_url)

                except Exception as e:
                    print(f"处理页面时出错: {url}, 错误: {str(e)}")
                    continue

            await browser.close()

    note_urls_text = " ".join(all_note_urls)
    if not all_note_urls:
        note_urls_text = "没有找到任何作品链接,Cookie可能已经失效"
    return note_urls_text, all_note_urls, user_infos


# 创建 Gradio 接口
def gradio_interface():
    # 定义输入字段
    with gr.Blocks(title="XHS") as demo:
        gr.Markdown("# XHS Downloader WebUI")
        gr.Markdown("""如遇到错误日志： \"响应内容不是有效的 JSON 数据，请尝试更新 Cookie！\"
        - 此时请更新Cookie
        - 如更新后任然无法执行，说明账号被平台风控，此时可以依次尝试下面2种方法：
        1. 退出XHS账号，刷新浏览器，再次获取Cookie
        2. 换一个的账号登陆，重新获取Cookie
        """)
        with gr.Tab("设置"):
            gr.Markdown("### 设置 Cookie")
            cookie_input = gr.Textbox(
                label="Cookie",
                value=read_setting().get("cookie", ""),
                lines=5,
                placeholder="请输入你的 Cookie"
            )

            # 读取.env中的PROXY_XHS
            proxy_from_env = os.getenv("PROXY_XHS", "")
            proxy_input = gr.Textbox(
                label="代理地址",
                value=read_setting().get("proxy", "") or proxy_from_env,
                lines=1,
                placeholder="请输入代理地址，例如：http://127.0.0.1:10808"
            )

            def save_settings(cookie, proxy):
                setting = read_setting()
                setting["cookie"] = cookie
                # 如果代理地址不为空，则更新，否则保持原值
                if proxy:
                    setting["proxy"] = proxy
                write_setting(setting)
                return "设置已保存！"

            # save_button = gr.Button("保存 Cookie")
            save_button = gr.Button("保存设置")
            save_output = gr.Textbox(label="状态", interactive=False)

            save_button.click(
                fn=save_settings,
                inputs=[cookie_input, proxy_input],
                outputs=save_output
            )
        with gr.Tab("小红书作品数据采集"):
            gr.Markdown("""
            ### 使用说明
            1. 可输入小红书作者主页链接获取其作品清单（支持多个链接，用空格分隔）
            2. 也可直接输入作品链接进行采集（支持多个链接，用空格分隔）
            3. 选择是否下载作品文件和保存数据
            """)

            with gr.Row():
                with gr.Column():
                    url_input = gr.Textbox(
                        label="输入作者主页链接或作品链接（支持多个，用空格分隔）",
                        lines=5,
                        placeholder="https://www.xiaohongshu.com/user/profile/xxx 或 https://www.xiaohongshu.com/discovery/item/xxx"
                    )
                    with gr.Row():
                        download_checkbox = gr.Checkbox(label="下载作品文件", value=True)
                        data_checkbox = gr.Checkbox(label="获取作品数据", value=True)
                        recent_nums = gr.Number(label="获取最近作品链接（包含置顶）", value=10, precision=0)

                    submit_button = gr.Button("获取作品链接")
                    process_button = gr.Button("采集作品数据")

                with gr.Column():
                    note_urls_output = gr.Textbox(label="作品链接清单", lines=10, max_lines=15, interactive=False)
                    process_output = gr.JSON(label="采集结果")

            log_output = gr.Textbox(label="采集日志", value=read_download_and_extract_logs, lines=8,
                                    max_lines=10, every=5)

            # 存储获取到的作品链接
            note_urls_state = gr.State()

            # 绑定按钮点击事件
            submit_button.click(
                fn=extract_note_urls,
                inputs=[url_input, recent_nums],
                outputs=[note_urls_output, note_urls_state, ]
            )

            process_button.click(
                fn=process_notes,
                inputs=[note_urls_output, download_checkbox, data_checkbox],
                outputs=process_output
            )

        with gr.Tab("历史记录"):
            gr.Markdown("### 查看历史记录\n支持单个文件夹或多个文件压缩后下载。")
            with gr.Row():
                with gr.Column():
                    file_explorer = gr.FileExplorer(
                        label="任务文件夹",
                        glob="**/*",
                        root_dir=os.path.join(current_dir, os.getenv("ROOT")),
                        every=1,
                        height=300,
                    )
                    refresh_btn = gr.Button("刷新")
                    clear_btn = gr.Button("清除所有文件")

                    def update_file_explorer():
                        return gr.FileExplorer(root_dir="")

                    def update_file_explorer_2():
                        return gr.FileExplorer(root_dir=os.path.join(current_dir, os.getenv("ROOT")))

                    def clear_all_files():
                        # 清除ROOT目录下的所有文件和文件夹
                        root_dir = os.path.join(current_dir, os.getenv("ROOT"))
                        if os.path.exists(root_dir):
                            for item in os.listdir(root_dir):
                                item_path = os.path.join(root_dir, item)
                                if os.path.isfile(item_path):
                                    os.remove(item_path)
                                elif os.path.isdir(item_path):
                                    import shutil
                                    shutil.rmtree(item_path)

                        # 清除ZIP目录下的所有文件和文件夹
                        zip_dir = os.getenv("ZIP_DIR", "zips")
                        zip_path = os.path.join(current_dir, zip_dir)
                        if os.path.exists(zip_path):
                            for item in os.listdir(zip_path):
                                item_path = os.path.join(zip_path, item)
                                if os.path.isfile(item_path):
                                    os.remove(item_path)
                                elif os.path.isdir(item_path):
                                    import shutil
                                    shutil.rmtree(item_path)

                        # 清除Log目录下的所有文件和文件夹
                        log_dir = os.path.join(current_dir, "Log")
                        if os.path.exists(log_dir):
                            for item in os.listdir(log_dir):
                                item_path = os.path.join(log_dir, item)
                                if os.path.isfile(item_path):
                                    try:
                                        os.remove(item_path)
                                    except PermissionError:
                                        pass
                                elif os.path.isdir(item_path):
                                    import shutil
                                    shutil.rmtree(item_path)

                        return "所有文件已清除"

                    refresh_btn.click(update_file_explorer, outputs=file_explorer).then(update_file_explorer_2,
                                                                                        outputs=file_explorer)

                    clear_output = gr.Textbox(label="清除状态", interactive=False)
                    clear_btn.click(clear_all_files, outputs=clear_output)

                download_output = gr.File(label="ZIP下载链接",
                                          value=refresh_zip_files,
                                          height=100,
                                          every=10)
            download_button = gr.Button("ZIP压缩")
            download_button.click(
                fn=download_folder_or_files,
                inputs=file_explorer,
                outputs=download_output
            )

        # 新增定时任务tab页
        with gr.Tab("定时任务") as schedule_tab:
            # 添加密码验证
            pwd_visibility = gr.State(False)

            with gr.Group(visible=False) as pwd_container:
                admin_pwd = gr.Textbox(
                    label="管理员密码",
                    placeholder="请输入管理员密码以访问定时任务功能",
                    type="password"
                )
                pwd_button = gr.Button("验证")

            with gr.Group(visible=False) as task_container:
                gr.Markdown("### 定时获取用户主页内容")
                gr.Markdown(""" 
                ### 使用说明
                1. 填写用户的主页链接
                2. 设置获取最近多少条内容
                3. 设置执行时间
                4. 点击按钮创建定时任务
                """)

                with gr.Row():
                    with gr.Column():
                        # 用户最近多少条内容
                        schedule_recent_count = gr.Number(label="获取最近作品链接数量", value=10, precision=0)
                        # author_urls 是用户主页地址，主页地址1，主页地址2以空格进行分割
                        schedule_author_urls = gr.Textbox(
                            label="输入作者主页链接（支持多个，用空格分隔）",
                            lines=5,
                            placeholder="https://www.xiaohongshu.com/user/profile/xxx"
                        )
                        schedule_time = gr.Textbox(
                            label="执行时间 (HH:MM格式，例如 14:30)",
                            placeholder="请输入执行时间，格式如 14:30"
                        )
                        schedule_button = gr.Button("创建定时任务")

                    with gr.Column():
                        schedule_output = gr.Textbox(label="执行结果", lines=10, max_lines=15, interactive=False)
                        schedule_state = gr.State()

                def create_scheduled_task(author_urls, recent_count, exec_time):
                    if not exec_time:
                        return "请设置执行时间", None
                    # 验证时间格式
                    try:
                        time.strptime(exec_time, "%H:%M")
                    except ValueError:
                        return "时间格式错误，请使用 HH:MM 格式，例如 14:30", None

                    result = add_scheduled_task(exec_time, author_urls, int(recent_count))
                    return result, None

                schedule_button.click(
                    fn=create_scheduled_task,
                    inputs=[schedule_author_urls, schedule_recent_count, schedule_time],
                    outputs=[schedule_output, schedule_state]
                )

                # 查看和删除定时任务
                gr.Markdown("### 管理定时任务")

                with gr.Row():
                    with gr.Column():
                        refresh_tasks_button = gr.Button("刷新任务列表")
                        delete_task_id = gr.Textbox(
                            label="要删除的任务ID",
                            placeholder="请输入要删除的任务ID"
                        )
                        delete_task_button = gr.Button("删除任务")

                    with gr.Column():
                        tasks_list = gr.Textbox(
                            label="当前定时任务列表",
                            lines=10,
                            max_lines=15,
                            interactive=False
                        )

                def list_scheduled_tasks():
                    if not scheduled_tasks:
                        return "暂无定时任务"

                    task_list = "当前定时任务列表:\n\n"
                    for task_id, task_info in scheduled_tasks.items():
                        task_list += f"任务ID: {task_id}\n"
                        task_list += f"执行时间: 每天 {task_info['time']}\n"
                        task_list += f"作者链接: {task_info['author_urls']}\n"
                        task_list += f"获取数量: {task_info['recent_count']}\n"
                        task_list += f"状态: {'启用' if task_info['active'] else '禁用'}\n"
                        task_list += "-" * 40 + "\n"

                    return task_list

                def delete_scheduled_task(task_id):
                    if not task_id:
                        return "请输入要删除的任务ID"

                    if task_id in scheduled_tasks:
                        # 取消调度的任务
                        jobs = schedule.get_jobs()
                        for job in jobs:
                            # 查找与任务ID关联的作业并取消
                            if task_id in job.tags:
                                schedule.cancel_job(job)
                                break
                        del scheduled_tasks[task_id]
                        return f"任务 {task_id} 已成功删除"
                    else:
                        return f"未找到任务ID为 {task_id} 的任务"

                refresh_tasks_button.click(
                    fn=list_scheduled_tasks,
                    outputs=tasks_list
                )

                delete_task_button.click(
                    fn=delete_scheduled_task,
                    inputs=delete_task_id,
                    outputs=schedule_output
                )

            def verify_password(password):
                if password == "admin":
                    return {pwd_container: gr.update(visible=False), task_container: gr.update(visible=True)}
                else:
                    raise gr.Error("密码错误，请输入正确的管理员密码")

            pwd_button.click(
                fn=verify_password,
                inputs=admin_pwd,
                outputs=[pwd_container, task_container]
            )

            # 当Tab被选择时，默认显示密码输入界面
            def show_password_prompt():
                return {pwd_container: gr.update(visible=True), task_container: gr.update(visible=False)}

            schedule_tab.select(
                fn=show_password_prompt,
                outputs=[pwd_container, task_container]
            )

    return demo


# 启动 Gradio Web 服务
if __name__ == "__main__":
    app = gradio_interface()
    parser = argparse.ArgumentParser()
    parser.add_argument('--port', type=int, default=7877, help='Gradio 应用监听的端口号')
    args = parser.parse_args()
    try:
        if os.getenv('PLATFORM', '') == 'local':
            app.launch(share=False, ssl_verify=False, ssl_certfile="cert.pem",
                       ssl_keyfile="key.pem",
                       allowed_paths=[os.getenv("ROOT"), os.getenv("ZIP_DIR"), 'Download', "Log"],
                       server_port=args.port, favicon_path="favicon.ico", root_path="/xhs-plugin")
        elif os.getenv('PLATFORM', '') == 'server':
            app.launch(share=False, ssl_verify=False, ssl_certfile="cert.pem",
                       ssl_keyfile="key.pem", server_name="0.0.0.0",
                       allowed_paths=[os.getenv("ROOT"), os.getenv("ZIP_DIR"), 'Download', "Log"],
                       server_port=args.port, favicon_path="favicon.ico", root_path="/xhs-plugin")
    finally:
        # 停止定时任务调度器
        stop_scheduler_thread()
