"""
简化版监控任务 - 避免Celery内部错误
"""

import asyncio
import json
import time
import threading
import hashlib
import random
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
from sqlalchemy.orm import Session

# 添加日期解析库
try:
    import dateutil.parser
except ImportError:
    # 如果没有安装dateutil，提供一个简单的fallback
    dateutil = None

from app.celery_app import celery_app
from app.database import SessionLocal
from app.models import Monitor, User
from app.models.monitor import MonitorStatus, MonitorType
from app.utils.redis_client import get_redis_client
from app.utils.logger import get_monitor_logger, log_function_call, log_async_function_call
from app.utils.mercari_client import MercariAPI, search_mercari_products, get_mercari_seller_products, get_mercari_shop_products
from app.services.notification_service import NotificationService
from app.tasks.notification_tasks import send_notification_with_log
from app.config import settings
from app.utils.cache_manager import cache_manager, cache_result

# 获取监控专用日志器
monitor_logger = get_monitor_logger()

# 基于Redis的调度辅助函数，避免DB写放大并提升节流准确性
def _last_run_key(monitor_id: int) -> str:
    return f"monitor_last_run_at:{monitor_id}"


def _next_schedule_key(monitor_id: int) -> str:
    return f"monitor_next_schedule_at:{monitor_id}"


def _redis_get_dt(key: str):
    """从Redis读取ISO时间，返回datetime或None"""
    try:
        rw = get_redis_client()
        value = rw.get(key)
        if not value:
            return None
        s = value if isinstance(value, str) else str(value)
        try:
            if dateutil:
                return dateutil.parser.isoparse(s)
            return datetime.fromisoformat(s)
        except Exception:
            return None
    except Exception:
        return None


def _redis_set_dt(key: str, dt: datetime, ex: int = 86400):
    """将datetime以ISO格式写入Redis，默认过期1天"""
    try:
        rw = get_redis_client()
        rw.set(key, dt.isoformat(), ex=ex)
    except Exception:
        pass

# 北京时间函数
def get_beijing_now():
    """获取北京时间（UTC+8）"""
    return datetime.utcnow() + timedelta(hours=8)


# 优化的商品缓存函数
def mark_product_seen_optimized(monitor_id: int, product_id: str, expires_at: datetime, product_created_time: Optional[str] = None) -> None:
    """
    优化版商品去重缓存 - 使用混合策略：数量限制 + 时间窗口

    Args:
        monitor_id: 监控任务ID
        product_id: 商品ID
        expires_at: 监控任务过期时间
        product_created_time: 商品创建时间（ISO格式字符串，可选）
    """
    redis_wrapper = get_redis_client()
    redis_client = redis_wrapper.client  # 使用底层的 Redis 客户端
    cache_key = f"product_seen:{monitor_id}"
    current_time = time.time()

    # 使用有序集合存储，分数为当前时间戳
    redis_client.zadd(cache_key, {product_id: current_time})

    # 如果提供了商品创建时间，也存储到专门的创建时间缓存中
    if product_created_time:
        created_cache_key = f"product_created:{monitor_id}"
        try:
            # 解析商品创建时间
            import dateutil.parser
            created_dt = dateutil.parser.parse(product_created_time)
            created_timestamp = created_dt.timestamp()
            redis_client.zadd(created_cache_key, {product_id: created_timestamp})

            # 设置创建时间缓存的过期时间
            if expires_at:
                current_beijing_time = get_beijing_now()
                if expires_at > current_beijing_time:
                    ttl_seconds = int((expires_at - current_beijing_time).total_seconds())
                    redis_client.expire(created_cache_key, ttl_seconds)
        except Exception as e:
            # 如果解析创建时间失败，记录警告但不影响主流程
            pass

    # 清理策略：保留最新N条记录 + 时间窗口
    # 1. 保留最新N条记录（增加缓存数量以应对商品下架问题）
    cache_size = NEW_PRODUCT_CONFIG["cache_size"]
    redis_client.zremrangebyrank(cache_key, 0, -(cache_size + 1))  # 保留最后N个

    # 2. 清理时间窗口外的记录
    time_window_hours = NEW_PRODUCT_CONFIG["time_window_hours"]
    time_window = time_window_hours * 3600
    cutoff_time = current_time - time_window
    redis_client.zremrangebyscore(cache_key, 0, cutoff_time)

    # 计算到监控过期的秒数
    if expires_at:
        current_beijing_time = get_beijing_now()
        if expires_at > current_beijing_time:
            ttl_seconds = int((expires_at - current_beijing_time).total_seconds())
            redis_client.expire(cache_key, ttl_seconds)
        else:
            # 如果已过期，设置很短的过期时间
            redis_client.expire(cache_key, 60)


def has_seen_product_optimized(monitor_id: int, product_id: str) -> bool:
    """检查商品是否在缓存中（基于时间窗口和数量限制）"""
    redis_wrapper = get_redis_client()
    redis_client = redis_wrapper.client  # 使用底层的 Redis 客户端
    cache_key = f"product_seen:{monitor_id}"
    return redis_client.zscore(cache_key, product_id) is not None


def is_truly_new_product(monitor_id: int, product_id: str, product_created_time: Optional[str] = None, check_window_hours: Optional[int] = None) -> bool:
    """
    智能判断商品是否为真正的新商品

    Args:
        monitor_id: 监控ID
        product_id: 商品ID
        product_created_time: 商品创建时间（ISO格式字符串）
        check_window_hours: 检查窗口时间（小时）

    Returns:
        bool: True表示是真正的新商品
    """
    # 1. 基础检查：商品是否在缓存中
    if has_seen_product_optimized(monitor_id, product_id):
        return False

    # 2. 如果启用了基于时间的检查且提供了商品创建时间
    if NEW_PRODUCT_CONFIG["enable_time_based_check"] and product_created_time:
        try:
            from datetime import timezone
            import dateutil.parser

            # 解析商品创建时间
            created_dt = dateutil.parser.parse(product_created_time)
            if created_dt.tzinfo is None:
                # 如果没有时区信息，假设为UTC
                created_dt = created_dt.replace(tzinfo=timezone.utc)

            # 计算时间差
            now = datetime.now(timezone.utc)
            time_diff = now - created_dt
            hours_diff = time_diff.total_seconds() / 3600

            # 使用配置的检查窗口时间
            window_hours = check_window_hours or NEW_PRODUCT_CONFIG["new_product_window_hours"]

            # 如果商品创建时间超过检查窗口，认为不是新商品
            if hours_diff > window_hours:
                return False

        except Exception as e:
            # 如果时间解析失败，降级到基础检查
            pass

    return True


# 新商品判断配置
NEW_PRODUCT_CONFIG = {
    "cache_size": 30,  # 缓存商品数量（从10增加到30）
    "time_window_hours": 24,  # 时间窗口（小时）
    "new_product_window_hours": 2,  # 新商品判断窗口（小时）
    "enable_time_based_check": True,  # 是否启用基于时间的检查
}

# 四档调度配置
TIER_CONFIG = {
    25: {
        "name": "extended",
        "batch_size": 100,
        "timeout": 10,
        "description": "扩展层 - 独立任务模式"
    },
    20: {
        "name": "enhanced",
        "batch_size": 100,
        "timeout": 10,
        "description": "扩展层 - 独立任务模式"
    },
    15: {
        "name": "basic",
        "batch_size": 100,
        "timeout": 12,
        "description": "基础层 - 大批量处理"
    },
    10: {
        "name": "advanced",
        "batch_size": 50,
        "timeout": 8,
        "description": "进阶层 - 中批量专用"
    },
    5: {
        "name": "premium",
        "batch_size": 20,
        "timeout": 4,
        "description": "高级层 - 小批量快速"
    },
    3: {
        "name": "ultra",
        "batch_size": 1,
        "timeout": 2,
        "description": "极速层 - 独立任务"
    },
    1: {
        "name": "ultra_plus",
        "batch_size": 1,
        "timeout": 1,
        "description": "超极速层 - 独立任务"
    }
}


def map_db_interval_to_actual_interval(db_interval: int) -> int:
    """
    将数据库中的check_interval映射到实际的执行间隔
    
    Args:
        db_interval: 数据库中存储的check_interval值
        
    Returns:
        int: 实际的执行间隔（秒）
    """
    # 映射关系：
    # 15s档实际为25s查询一次
    # 10s档实际为20s查询一次
    # 5s档实际为15s查询一次
    # 3s档实际为5s查询一次（使用ultra_precision_monitor）
    # 1s档实际为3s查询一次（使用ultra_precision_monitor）
    interval_mapping = {
        15: 25,  # 15秒档实际25秒执行一次
        10: 20,  # 10秒档实际20秒执行一次
        5: 15,   # 5秒档实际15秒执行一次
        3: 5,    # 3秒档实际5秒执行一次
        1: 3     # 1秒档实际3秒执行一次
    }
    return interval_mapping.get(db_interval, db_interval)


@celery_app.task(name="simple_check_monitors")
@log_function_call("monitor")
def simple_check_monitors() -> Dict[str, Any]:
    """
    六档调度系统 - 按check_interval分层处理监控任务
    """
    
    start_time = get_beijing_now()
    monitor_logger.info("🔍 开始六档调度监控检查")

    try:
        db = SessionLocal()

        # 获取所有活跃监控
        active_monitors = db.query(Monitor).filter(
            Monitor.status == MonitorStatus.ACTIVE,
            Monitor.is_active == True
        ).all()

        # 过滤掉已过期的监控
        # 数据库存储的是北京时间，所以用北京时间比较
        current_time = get_beijing_now()
        valid_monitors = []
        expired_count = 0

        for monitor in active_monitors:
            # 如果没有设置过期时间，默认24小时后过期
            if not monitor.expires_at:
                monitor.expires_at = monitor.created_at + timedelta(hours=monitor.duration_hours or 24)
                db.commit()

            # 检查是否过期
            if monitor.expires_at > current_time:
                valid_monitors.append(monitor)
            else:
                expired_count += 1

        active_monitors = valid_monitors
        if len(active_monitors) > 0 or expired_count > 0:
            monitor_logger.info(f"📊 有效监控: {len(active_monitors)} 个，已过期: {expired_count} 个")

        # 按六档调度系统处理，将数据库中的检查间隔映射到实际执行间隔
        all_results = []
        total_processed = 0
        total_skipped = 0

        # 数据库中的检查间隔到实际执行间隔的映射：
        # 15秒档 -> 实际25秒执行一次
        # 10秒档 -> 实际20秒执行一次
        # 5秒档  -> 实际15秒执行一次
        # 3秒档  -> 实际5秒执行一次（使用ultra_precision_monitor，实际间隔3秒）
        # 1秒档  -> 实际3秒执行一次（使用ultra_precision_monitor）
        for db_interval in [15, 10, 5, 3, 1]:
            tier_monitors = [m for m in active_monitors if getattr(m, 'check_interval', 15) == db_interval]
            actual_interval = map_db_interval_to_actual_interval(db_interval)

            if tier_monitors:
                config = TIER_CONFIG[actual_interval]
                monitor_logger.info(f"🎯 {db_interval}秒档(实际{actual_interval}秒): {len(tier_monitors)} 个监控")

                tier_results, processed, skipped = process_tier_monitors(tier_monitors, actual_interval, config, db)
                all_results.extend(tier_results)
                total_processed += processed
                total_skipped += skipped

        db.close()

        # 统计结果
        end_time = get_beijing_now()
        duration = (end_time - start_time).total_seconds()
        success_count = sum(1 for r in all_results if r.get("status") in ["success", "scheduled"])
        error_count = sum(1 for r in all_results if r.get("status") == "error")

        if total_processed > 0:
            monitor_logger.info(f"✅ 完成: 处理{total_processed}, 成功{success_count}, 失败{error_count}, 耗时{duration:.1f}s")

        return {
            "status": "completed",
            "total_monitors": len(active_monitors),
            "processed": total_processed,
            "skipped": total_skipped,
            "success": success_count,
            "errors": error_count,
            "duration_seconds": duration,
            "results": all_results,
            "timestamp": get_beijing_now().isoformat()
        }

    except Exception as e:
        error_msg = str(e)
        monitor_logger.error(f"❌ 六档调度失败: {error_msg}")
        return {
            "status": "error",
            "error": error_msg,
            "timestamp": get_beijing_now().isoformat()
        }


def process_tier_monitors(monitors: List[Monitor], interval: int, config: Dict, db: Session) -> tuple:
    """
    处理特定档位的监控任务

    Args:
        monitors: 监控任务列表
        interval: 检查间隔(秒)
        config: 档位配置
        db: 数据库会话

    Returns:
        tuple: (results, processed_count, skipped_count)
    """
    results = []
    processed_count = 0
    skipped_count = 0

    # 过滤需要检查的监控
    ready_monitors = []
    for monitor in monitors:
        if should_check_monitor(monitor, interval):
            ready_monitors.append(monitor)
        else:
            skipped_count += 1

    if not ready_monitors:
        return results, processed_count, skipped_count

    # 根据档位选择处理策略 - 全面独立任务模式
    if interval <= 25:  # 25秒及以下全部使用独立任务模式 (3s, 5s, 10s, 15s, 20s, 25s)
        # monitor_logger.error(f"❌ : 档位{interval}秒(实际{interval}秒)")
        results, processed_count = process_independent_tier(ready_monitors, interval, db)
    else:
        # 大于25秒的使用批量处理（预留给未来的30秒档等）
        results, processed_count = process_batch_tier(ready_monitors, config, db)

    return results, processed_count, skipped_count


def should_check_monitor(monitor: Monitor, interval: int) -> bool:
    """
    判断监控是否到了检查时间（优先参考Redis的last_run_at/next_schedule_at）
    注意：这里的interval是实际执行间隔，可能与数据库中配置的检查间隔不同（经过了映射）
    """
    now = get_beijing_now()

    # 若已经有下一次计划时间且仍在未来，则不调度，避免与自调度叠加
    try:
        next_dt = _redis_get_dt(_next_schedule_key(monitor.id))
        if next_dt and next_dt > now and (next_dt - now).total_seconds() > 0.5:
            return False
    except Exception:
        pass

    # 优先使用Redis记录的最近执行时间
    try:
        last_dt = _redis_get_dt(_last_run_key(monitor.id))
        if last_dt:
            return (now - last_dt).total_seconds() >= interval
    except Exception:
        pass

    # 回退到DB字段（仅作展示用途，可能未更新）
    if not monitor.last_check_at:
        return True
    time_since_last = now - monitor.last_check_at
    return time_since_last.total_seconds() >= interval


def process_batch_tier(monitors: List[Monitor], config: Dict, db: Session) -> tuple:
    """
    批量处理监控任务 (15s/10s/5s档)

    Args:
        monitors: 监控任务列表
        config: 档位配置
        db: 数据库会话

    Returns:
        tuple: (results, processed_count)
    """
    results = []
    processed_count = 0
    batch_size = config["batch_size"]
    timeout = config["timeout"]

    # 分批处理
    for i in range(0, len(monitors), batch_size):
        batch = monitors[i:i + batch_size]

        try:
            # 使用asyncio.wait_for控制超时
            batch_results = asyncio.run(
                asyncio.wait_for(
                    process_monitor_batch(batch, db),
                    timeout=timeout
                )
            )

            results.extend(batch_results)
            processed_count += len(batch)

        except asyncio.TimeoutError:
            monitor_logger.warning(f"⏰ 批量处理超时 ({timeout}秒)")
            # 超时的监控标记为错误
            for monitor in batch:
                results.append({
                    "monitor_id": monitor.id,
                    "status": "error",
                    "error": f"批量处理超时 ({timeout}秒)"
                })
            processed_count += len(batch)

        except Exception as e:
            monitor_logger.error(f"❌ 批量处理异常: {e}")
            # 异常的监控标记为错误
            for monitor in batch:
                results.append({
                    "monitor_id": monitor.id,
                    "status": "error",
                    "error": str(e)
                })
            processed_count += len(batch)

    return results, processed_count


# process_ultra_tier 函数已废弃 - 已被 process_independent_tier 替代


def process_independent_tier(monitors: List[Monitor], interval: int, db: Session) -> tuple:
    """
    独立任务处理器 - 支持所有精度档位

    Args:
        monitors: 监控任务列表
        interval: 检查间隔(秒)
        db: 数据库会话

    Returns:
        tuple: (results, processed_count)
    """
    results = []
    processed_count = 0

    for monitor in monitors:
        try:
            # 根据间隔选择合适的任务
            if interval <= 5:  # 5秒及以下档位使用ultra_precision_monitor
                # 1秒/3秒/5秒档使用ultra_precision_monitor
                next_dt = get_beijing_now() + timedelta(seconds=interval)
                _redis_set_dt(_next_schedule_key(monitor.id), next_dt, ex=max(interval * 100, 86400))
                task = ultra_precision_monitor.apply_async(
                    args=[monitor.id, interval],
                    countdown=0
                )
            else:
                # 其他档位使用precision_monitor
                next_dt = get_beijing_now() + timedelta(seconds=interval)
                _redis_set_dt(_next_schedule_key(monitor.id), next_dt, ex=max(interval * 100, 86400))
                task = precision_monitor.apply_async(
                    args=[monitor.id, interval],
                    countdown=0
                )

            results.append({
                "monitor_id": monitor.id,
                "status": "scheduled",
                "task_id": task.id,
                "interval": interval,
                "message": f"已调度为{interval}秒档独立任务"
            })
            processed_count += 1

        except Exception as e:
            monitor_logger.error(f"❌ 监控 {monitor.id} ({interval}秒档) 独立任务调度失败: {e}")
            results.append({
                "monitor_id": monitor.id,
                "status": "error",
                "error": str(e),
                "interval": interval
            })
            processed_count += 1

    return results, processed_count


async def process_monitor_batch(monitors: List[Monitor], db: Session) -> List[Dict]:
    """
    异步处理一批监控任务

    Args:
        monitors: 监控任务列表
        db: 数据库会话

    Returns:
        List[Dict]: 处理结果列表
    """
    results = []

    for monitor in monitors:
        try:
            result = await check_single_monitor_async(monitor, db)
            results.append(result)

        except Exception as e:
            monitor_logger.error(f"❌ 监控 {monitor.id} 处理异常: {e}")
            results.append({
                "monitor_id": monitor.id,
                "status": "error",
                "error": str(e)
            })

    return results


# simple_check_single_monitor 任务已废弃 - 当前使用四档调度系统


@log_async_function_call("monitor")
async def check_single_monitor_async(monitor: Monitor, db: Session) -> Dict[str, Any]:
    """
    异步检查单个监控任务
    """
    monitor_start_time = get_beijing_now()

    try:
        # 解析监控条件
        try:
            conditions = json.loads(monitor.conditions) if isinstance(monitor.conditions, str) else monitor.conditions
        except Exception as parse_error:
            monitor_logger.error(f"❌ 监控 {monitor.id} 条件解析失败: {parse_error}")
            conditions = {}

        # 根据监控类型获取不同的参数
        if monitor.monitor_type == MonitorType.KEYWORD:
            keyword = conditions.get("keyword", "")
            if not keyword:
                monitor_logger.error(f"❌ 监控 {monitor.id} 缺少关键词")
                return {"status": "error", "reason": "no_keyword", "monitor_id": monitor.id}
        elif monitor.monitor_type == MonitorType.SELLER:
            seller_id = conditions.get("seller_id", "")
            seller_name = conditions.get("seller_name", "")
            
            # 如果没有seller_id，尝试从seller_url中提取
            if not seller_id:
                seller_url = conditions.get("seller_url", "")
                if seller_url:
                    try:
                        # 从URL中提取seller_id：https://jp.mercari.com/user/profile/940872164
                        # 提取profile/后面的部分
                        if "/profile/" in seller_url:
                            seller_id = seller_url.split("/profile/")[-1]
                            # 清理可能的查询参数或锚点
                            seller_id = seller_id.split("?")[0].split("#")[0]
                            # monitor_logger.info(f"🔗 从URL提取卖家ID: {seller_url} -> {seller_id}")
                        else:
                            monitor_logger.error(f"❌ 监控 {monitor.id} seller_url格式不正确: {seller_url}")
                            return {"status": "error", "reason": "invalid_seller_url", "monitor_id": monitor.id}
                    except Exception as url_error:
                        monitor_logger.error(f"❌ 监控 {monitor.id} 从URL提取卖家ID失败: {url_error}")
                        return {"status": "error", "reason": "seller_url_parse_error", "monitor_id": monitor.id}
                        
            # 最终检查是否有seller_id
            if not seller_id:
                monitor_logger.error(f"❌ 监控 {monitor.id} 缺少卖家ID或卖家URL")
                return {"status": "error", "reason": "no_seller_id_or_url", "monitor_id": monitor.id}
        elif monitor.monitor_type == MonitorType.SHOP:
            product_id = conditions.get("product_id", "")
            shop_name = conditions.get("shop_name", "")
            
            # 如果没有product_id，尝试从product_url中提取
            if not product_id:
                product_url = conditions.get("product_url", "")
                if product_url:
                    try:
                        # 从URL中提取product_id：https://jp.mercari.com/shops/product/jAqutzF5uNJrH2akpDNWwF
                        # 提取product/后面的部分
                        if "/product/" in product_url:
                            product_id = product_url.split("/product/")[-1]
                            # 清理可能的查询参数或锚点
                            product_id = product_id.split("?")[0].split("#")[0]
                            # monitor_logger.info(f"🔗 从URL提取商品ID: {product_url} -> {product_id}")
                        else:
                            monitor_logger.error(f"❌ 监控 {monitor.id} product_url格式不正确: {product_url}")
                            return {"status": "error", "reason": "invalid_product_url", "monitor_id": monitor.id}
                    except Exception as url_error:
                        monitor_logger.error(f"❌ 监控 {monitor.id} 从URL提取商品ID失败: {url_error}")
                        return {"status": "error", "reason": "url_parse_error", "monitor_id": monitor.id}
                        
            # 检查用户是否存在（仅在需要时查询）
            # 最终检查是否有product_id
            if not product_id:
                monitor_logger.error(f"❌ 监控 {monitor.id} 缺少商品ID或商品URL")
                return {"status": "error", "reason": "no_product_id_or_url", "monitor_id": monitor.id}
        else:
            monitor_logger.error(f"❌ 监控 {monitor.id} 不支持的监控类型: {monitor.monitor_type}")
            return {"status": "error", "reason": "unsupported_type", "monitor_id": monitor.id}

        # 根据监控类型调用不同的API
        api_success = False
        api_response_time = 0
        try:
            import time
            api_start_time = time.time()

            if monitor.monitor_type == MonitorType.KEYWORD:
                # monitor_logger.info(f"🔍 关键词API: '{keyword}', ID={monitor.id}")

                # 使用关键词搜索API（结果级缓存 + 进行中占位）
                rcw = get_redis_client()
                interval = getattr(monitor, "check_interval", 15) or 15
                # 根据实际执行间隔映射
                actual_interval = map_db_interval_to_actual_interval(interval)
                ttl_map = {1: 2, 3: 2, 5: 4, 10: 8, 15: 12}
                cache_ttl = ttl_map.get(interval, max(2, min(30, int(actual_interval))))

                # 档位自适应limit（关键词）
                kw_limit_map = {1: 2, 3: 2, 5: 3, 10: 3, 15: 4}
                adjusted_limit = kw_limit_map.get(interval, 10)

                sig_raw = f"{keyword}|SORT_CREATED_TIME|ORDER_DESC|{adjusted_limit}"
                sig = hashlib.sha1(sig_raw.encode("utf-8")).hexdigest()
                cache_key = f"mercari:search:{sig}"
                lock_key = f"{cache_key}:inflight"

                products = rcw.get(cache_key)
                if products is not None:
                    monitor_logger.info(f"♻️ 缓存命中(keyword): key={cache_key} ttl≈{cache_ttl}s items={len(products) if isinstance(products, list) else 'N/A'}")
                else:
                    acquired = False
                    try:
                        acquired = bool(rcw.client.set(lock_key, "1", nx=True, ex=10))
                    except Exception:
                        acquired = False

                    if not acquired:
                        for _ in range(10):
                            await asyncio.sleep(0.25)
                            products = rcw.get(cache_key)
                            if products is not None:
                                monitor_logger.info(f"⏳ 等待在途完成后命中缓存(keyword): key={cache_key} items={len(products) if isinstance(products, list) else 'N/A'}")
                                break

                    if products is None:
                        # 在真正打API前加一点抖动，避免同秒齐发（仅持锁者）
                        if acquired:
                            max_jitter = min(1.0, 0.2 * float(interval))
                            if max_jitter > 0:
                                await asyncio.sleep(random.uniform(0, max_jitter))

                        products = search_mercari_products(
                            keyword=keyword,
                            sort='SORT_CREATED_TIME',
                            order='ORDER_DESC',
                            limit=adjusted_limit
                        )
                        if products is not None:
                            ex = cache_ttl if (isinstance(products, list) and len(products) > 0) else max(5, cache_ttl // 2)
                            rcw.set(cache_key, products, ex=ex)
                            # monitor_logger.info(f"💾 已写入缓存(keyword): key={cache_key} ttl={ex}s items={len(products) if isinstance(products, list) else 'N/A'}")
                        if acquired:
                            try:
                                rcw.delete(lock_key)
                            except Exception:
                                pass

            elif monitor.monitor_type == MonitorType.SELLER:
                # monitor_logger.info(f"🔍 煤炉卖家API: 卖家ID='{seller_id}', 监控ID={monitor.id}")

                # 使用卖家商品API（结果级缓存 + 进行中占位）
                rcw = get_redis_client()
                interval = getattr(monitor, "check_interval", 15) or 15
                # 根据实际执行间隔映射
                actual_interval = map_db_interval_to_actual_interval(interval)
                ttl_map = {1: 2, 3: 2, 5: 4, 10: 8, 15: 12}
                cache_ttl = ttl_map.get(interval, max(2, min(30, int(actual_interval))))

                # 档位自适应limit（卖家）
                seller_limit_map = {1: 2, 3: 2, 5: 3, 10: 3, 15: 5}
                adjusted_limit = seller_limit_map.get(interval, 20)

                status = "on_sale"
                cache_key = f"mercari:seller:{seller_id}:{adjusted_limit}:{status}"
                lock_key = f"{cache_key}:inflight"

                products = rcw.get(cache_key)
                if products is not None:
                    monitor_logger.info(f"♻️ 缓存命中(seller): key={cache_key} ttl≈{cache_ttl}s items={len(products) if isinstance(products, list) else 'N/A'}")
                else:
                    acquired = False
                    try:
                        acquired = bool(rcw.client.set(lock_key, "1", nx=True, ex=10))
                    except Exception:
                        acquired = False

                    if not acquired:
                        for _ in range(10):
                            await asyncio.sleep(0.25)
                            products = rcw.get(cache_key)
                            if products is not None:
                                monitor_logger.info(f"⏳ 等待在途完成后命中缓存(seller): key={cache_key} items={len(products) if isinstance(products, list) else 'N/A'}")
                                break

                    if products is None:
                        # 在真正打API前加一点抖动，避免同秒齐发（仅持锁者）
                        if acquired:
                            max_jitter = min(1.0, 0.2 * float(interval))
                            if max_jitter > 0:
                                await asyncio.sleep(random.uniform(0, max_jitter))

                        products = get_mercari_seller_products(
                            seller_id=seller_id,
                            limit=adjusted_limit,
                            status=status
                        )
                        if products is not None:
                            ex = cache_ttl if (isinstance(products, list) and len(products) > 0) else max(5, cache_ttl // 2)
                            rcw.set(cache_key, products, ex=ex)
                            # monitor_logger.info(f"💾 已写入缓存(seller): key={cache_key} ttl={ex}s items={len(products) if isinstance(products, list) else 'N/A'}")
                        if acquired:
                            try:
                                rcw.delete(lock_key)
                            except Exception:
                                pass

            elif monitor.monitor_type == MonitorType.SHOP:
                # monitor_logger.info(f"🔍 煤炉店铺API: 商品ID='{product_id}', 店铺名='{shop_name}', 监控ID={monitor.id}")

                # 使用店铺商品API
                products = get_mercari_shop_products(
                    product_id=product_id,
                    limit=6  # 店铺商品可以获取更多
                )

            api_response_time = round(time.time() - api_start_time, 3)

            if products is None:
                # API返回None，表示调用失败
                monitor_logger.error(f"❌ 煤炉API调用失败: 返回None, 耗时{api_response_time}s")
                api_success = False
                products = []
            elif isinstance(products, list):
                # API调用成功，返回商品列表
                api_success = True
                # product_count = len(products)
                # if product_count > 0:

                #     monitor_logger.info(f"✅ 煤炉API调用成功: 获取到{product_count}个商品, 耗时{api_response_time}s")
                # else:
                #     monitor_logger.info(f"✅ 煤炉API调用成功: 无商品, 耗时{api_response_time}s")
            else:
                # 意外的返回类型
                monitor_logger.warning(f"⚠️ 煤炉API返回异常类型: {type(products)}, 耗时{api_response_time}s")
                api_success = False
                products = []

        except Exception as api_error:
            api_response_time = round(time.time() - api_start_time, 3) if 'api_start_time' in locals() else 0
            monitor_logger.error(f"❌ 煤炉API调用异常: {api_error}, 耗时{api_response_time}s")
            api_success = False
            # API异常时直接返回错误，不使用模拟数据
            return {
                "status": "error",
                "monitor_id": monitor.id,
                "error": f"Mercari API调用失败: {str(api_error)}",
                "reason": "api_failure",
                "api_success": False,
                "api_response_time": api_response_time
            }

        # 处理商品数据 - 使用智能新商品判断策略
        new_products = []

        for product in products:
            product_id = product.get("id")
            product_created_time = product.get("created_time")  # 获取商品创建时间

            # 使用智能判断：结合缓存检查和商品创建时间
            if is_truly_new_product(monitor.id, product_id, product_created_time):
                # 标记为已见过，传入商品创建时间用于更精确的缓存管理
                mark_product_seen_optimized(monitor.id, product_id, monitor.expires_at, product_created_time)
                new_products.append(product)
                monitor_logger.debug(f"🆕 发现新商品: {product_id}, 创建时间: {product_created_time}")
            else:
                monitor_logger.debug(f"⏭️ 跳过已知商品: {product_id}")

        # 发送通知
        notifications_sent = 0
        if new_products:
            try:
                # 查询用户信息（仅在需要发送通知时）
                user = db.query(User).filter(User.id == monitor.user_id).first()
                if not user:
                    monitor_logger.error(f"❌ 监控 {monitor.id} 用户不存在: user_id={monitor.user_id}")
                    return {"status": "error", "reason": "user_not_found", "monitor_id": monitor.id}
                
                # 根据监控类型准备通知参数
                if monitor.monitor_type == MonitorType.KEYWORD:
                    notification_subject = keyword
                    monitor_type_name = "关键词监控"
                elif monitor.monitor_type == MonitorType.SELLER:
                    notification_subject = f"{seller_name}({seller_id})"
                    monitor_type_name = "卖家监控"
                elif monitor.monitor_type == MonitorType.SHOP:
                    notification_subject = f"{shop_name}"
                    monitor_type_name = "店铺监控"
                else:
                    notification_subject = "未知监控"
                    monitor_type_name = "未知类型"

                # 使用统一的通知任务发送通知（支持所有渠道）
                monitor_logger.info(f"📢 准备发送统一通知: 用户{user.email}, {monitor_type_name}, 商品数{len(new_products)}")
                
                # 获取用户通知渠道和相关信息
                user_channel = getattr(user, 'channel', 'EMAIL').upper()
                user_openid = getattr(user, 'openid', None)
                user_tg = getattr(user, 'tg', None) or getattr(user, 'telegram_id', None)
                user_email = getattr(user, 'email', None)
                
                monitor_logger.info(f"📋 用户通知参数: 渠道={user_channel}, openid={user_openid}, tg={user_tg}, email={user_email}")
                
                # 调用统一的通知任务
                try:
                    task_result = send_notification_with_log.delay(
                        user_id=user.id,
                        monitor_id=monitor.id,
                        products=new_products,
                        notification_subject=notification_subject,
                        monitor_type=monitor.monitor_type.value,
                        channel=user_channel,
                        user_openid=user_openid,
                        user_tg=user_tg,
                        user_email=user_email
                    )
                    
                    # 等待任务完成获取结果
                    result = task_result.get(timeout=90)  # 90秒超时
                    
                    # 处理可能的None返回值（可能是任务正在重试中）
                    if result is None:
                        monitor_logger.info(f"🔄 统一通知任务可能正在处理中或重试: Task ID={task_result.id}")
                        # 不将None结果视为错误，而是认为通知可能正在处理中
                        notification_success = True
                        notify_log_id = None
                        error_msg = None
                    else:
                        notification_success = result.get('notification_success', False)
                        notify_log_id = result.get('notify_log_id')
                        error_msg = result.get('error')
                    
                    monitor_logger.info(f"✅ 统一通知任务完成: notify_log_id={notify_log_id}, 成功={notification_success}")
                    
                    # 如果是微信通知成功，记录小程序跳转URL
                    if user_channel == 'WECHAT' and notification_success and result:
                        miniprogram_url = result.get('miniprogram_url')
                        if miniprogram_url:
                            monitor_logger.info(f"🎯 微信小程序跳转URL: {miniprogram_url}")
                        else:
                            monitor_logger.info(f"📱 微信通知成功，但未获取到小程序跳转URL")
                    
                    # 统计通知发送成功数
                    if notification_success:
                        notifications_sent = len(new_products)
                        monitor_logger.info(f"📢 统一通知发送成功: {len(new_products)} 个商品通知")
                    elif error_msg:
                        monitor_logger.warning(f"⚠️ 统一通知发送失败: {error_msg}")
                        
                except Exception as task_error:
                    monitor_logger.error(f"❌ 统一通知任务异常: {task_error}")
                    notification_success = False

            except Exception as e:
                monitor_logger.error(f"⚠️ 发送通知失败: {e}")
                # 统一通知任务已经处理了所有渠道的日志记录，包括失败情况
                # 不需要额外的失败日志处理
        
        # 更新监控统计 - 只有在新商品时才更新数据库
        if new_products:
            monitor.total_found += len(new_products)
            monitor.total_notified += len(new_products)
            monitor.last_check_at = get_beijing_now()
            db.commit()
        # 如果没有新商品，不更新数据库以减少压力

        # 计算执行时间
        monitor_end_time = get_beijing_now()
        monitor_duration = (monitor_end_time - monitor_start_time).total_seconds()

        if new_products or len(products) > 0:
            monitor_logger.info(f"✅ 监控{monitor.id}: 总{len(products)}个, 新{len(new_products)}个, API{'成功' if api_success else '失败'}, {monitor_duration:.1f}s")

        # 准备返回结果的监控信息
        if monitor.monitor_type == MonitorType.KEYWORD:
            monitor_info = {"keyword": keyword}
        elif monitor.monitor_type == MonitorType.SELLER:
            monitor_info = {"seller_id": seller_id, "seller_name": seller_name}
        elif monitor.monitor_type == MonitorType.SHOP:
            monitor_info = {"product_id": product_id, "shop_name": shop_name}
        else:
            monitor_info = {"monitor_type": str(monitor.monitor_type)}

        return {
            "status": "success",
            "monitor_id": monitor.id,
            "monitor_type": monitor.monitor_type.value,
            **monitor_info,  # 展开监控特定信息
            "new_products_count": len(new_products),
            "total_products": len(products),
            "notifications_sent": notifications_sent,
            "duration_seconds": monitor_duration,
            "timestamp": get_beijing_now().isoformat(),
            "api_success": api_success,
            "api_response_time": api_response_time
        }

    except Exception as e:
        error_msg = str(e)
        monitor_logger.error(f"❌ 监控 {monitor.id} 检查异常: {error_msg}")
        return {
            "status": "error",
            "monitor_id": monitor.id,
            "error": error_msg
        }


# ❗️ 旧的复杂通知函数已被统一的 send_notification_with_log 任务替代
# 不再需要分别处理不同渠道的通知发送和日志记录


@celery_app.task(bind=True, name="precision_monitor")
def precision_monitor(self, monitor_id: int, interval: int):
    """
    通用精度监控独立任务

    Args:
        monitor_id: 监控任务ID
        interval: 检查间隔（秒）

    Returns:
        Dict: 执行结果
    """
    # Redis分布式锁，防止重复执行
    redis_wrapper = get_redis_client()
    redis_client = redis_wrapper.client
    lock_key = f"monitor_lock:{monitor_id}"
    lock_timeout = max(interval * 2, 30)  # 锁超时时间为间隔的2倍，最少30秒

    monitor_logger.info(f"🔄 监控任务 {monitor_id} 开始执行，间隔 {interval} 秒")
    # 尝试获取锁
    lock_acquired = redis_client.set(lock_key, "locked", nx=True, ex=lock_timeout)
    if not lock_acquired:
        monitor_logger.debug(f"⏭️ 监控 {monitor_id} 已有任务在执行，跳过")
        return {
            "status": "skipped",
            "reason": "already_running",
            "monitor_id": monitor_id
        }

    try:
        db = SessionLocal()

        # 获取监控任务
        monitor = db.query(Monitor).filter(Monitor.id == monitor_id).first()
        if not monitor or monitor.status != MonitorStatus.ACTIVE:
            db.close()
            return {
                "status": "skipped",
                "reason": "monitor_inactive",
                "monitor_id": monitor_id
            }

        # 检查是否需要继续监控
        if not should_continue_monitoring(monitor):
            db.close()
            return {
                "status": "expired",
                "monitor_id": monitor_id
            }

        # 使用Redis进行频率控制，避免过于频繁执行
        now = get_beijing_now()
        last_dt = _redis_get_dt(_last_run_key(monitor_id))
        if last_dt and (now - last_dt).total_seconds() < max(interval - 1, 1):  # 允许1秒误差
            monitor_logger.debug(f"⏭️ 监控 {monitor_id} 距离上次执行仅 {(now - last_dt).total_seconds():.1f}秒，跳过")
            db.close()
            return {
                "status": "skipped",
                "reason": "too_frequent",
                "monitor_id": monitor_id,
                "last_run_seconds_ago": (now - last_dt).total_seconds()
            }

        # 记录本次开始执行时间到Redis，防止近邻重复调度
        _redis_set_dt(_last_run_key(monitor_id), now, ex=max(interval * 100, 86400))

        # 执行监控检查
        result = asyncio.run(check_single_monitor_async(monitor, db))

        # 如果监控仍然活跃且未过期，调度下次执行
        if (monitor.status == MonitorStatus.ACTIVE and
            monitor.is_active and
            should_continue_monitoring(monitor)):

            # 重新从数据库获取check_interval并映射，确保使用正确的间隔
            db_interval = getattr(monitor, 'check_interval', 15) or 15
            actual_interval = map_db_interval_to_actual_interval(db_interval)
            
            # 调度下次执行并记录下一次计划时间
            next_dt = now + timedelta(seconds=actual_interval)
            _redis_set_dt(_next_schedule_key(monitor_id), next_dt, ex=max(actual_interval * 100, 86400))
            precision_monitor.apply_async(
                args=[monitor_id, actual_interval],
                countdown=actual_interval
            )

        db.close()
        return result

    except Exception as e:
        error_msg = str(e)
        monitor_logger.error(f"❌ {interval}秒档监控 {monitor_id} 执行失败: {error_msg}")

        # 重试机制：根据间隔调整重试延迟
        try:
            retry_delay = min(interval, 10)  # 最大重试延迟10秒
            self.retry(countdown=retry_delay, max_retries=3)
        except Exception:
            return {
                "status": "error",
                "error": error_msg,
                "monitor_id": monitor_id
            }
    finally:
        # 释放锁
        try:
            redis_client.delete(lock_key)
        except Exception:
            pass


@celery_app.task(bind=True, name="ultra_precision_monitor")
def ultra_precision_monitor(self, monitor_id: int, interval: int = 3) -> Dict[str, Any]:
    """
    通用极速监控独立任务

    Args:
        monitor_id: 监控任务ID
        interval: 检查间隔（秒），默认3秒

    Returns:
        Dict: 执行结果
    """
    # Redis分布式锁，防止重复执行
    redis_wrapper = get_redis_client()
    redis_client = redis_wrapper.client
    lock_key = f"monitor_lock:{monitor_id}"
    lock_timeout = max(interval * 2, 30)  # 锁超时时间为间隔的2倍，最少30秒
    monitor_logger.info(f"🔄 监控任务 {monitor_id} 开始执行，间隔 {interval} 秒")
    # 尝试获取锁
    lock_acquired = redis_client.set(lock_key, "locked", nx=True, ex=lock_timeout)
    if not lock_acquired:
        monitor_logger.debug(f"⏭️ 监控 {monitor_id} 已有任务在执行，跳过")
        return {
            "status": "skipped",
            "reason": "already_running",
            "monitor_id": monitor_id
        }

    try:
        db = SessionLocal()

        # 获取监控任务
        monitor = db.query(Monitor).filter(Monitor.id == monitor_id).first()
        if not monitor or monitor.status != MonitorStatus.ACTIVE:
            db.close()
            return {
                "status": "skipped",
                "reason": "monitor_inactive",
                "monitor_id": monitor_id
            }

        # 检查是否需要继续监控
        if not should_continue_monitoring(monitor):
            db.close()
            return {
                "status": "expired",
                "monitor_id": monitor_id
            }

        # 使用Redis进行频率控制，允许1秒误差
        now = get_beijing_now()
        last_dt = _redis_get_dt(_last_run_key(monitor_id))
        if last_dt and (now - last_dt).total_seconds() < max(interval - 1, 1):
            monitor_logger.debug(f"⏭️ 监控 {monitor_id} 距离上次执行仅 {(now - last_dt).total_seconds():.1f}秒，跳过")
            db.close()
            return {
                "status": "skipped",
                "reason": "too_frequent",
                "monitor_id": monitor_id,
                "last_run_seconds_ago": (now - last_dt).total_seconds()
            }

        # 记录本次开始执行时间到Redis，防止近邻重复调度
        _redis_set_dt(_last_run_key(monitor_id), now, ex=max(interval * 100, 86400))

        # 执行监控检查
        result = asyncio.run(check_single_monitor_async(monitor, db))

        # 如果监控仍然活跃且未过期，调度下次执行
        if (monitor.status == MonitorStatus.ACTIVE and
            monitor.is_active and
            should_continue_monitoring(monitor)):

            # 重新从数据库获取check_interval并映射，确保使用正确的间隔
            db_interval = getattr(monitor, 'check_interval', 15) or 15
            actual_interval = map_db_interval_to_actual_interval(db_interval)
            
            # 调度下次执行，并记录下一次计划时间
            next_dt = now + timedelta(seconds=actual_interval)
            _redis_set_dt(_next_schedule_key(monitor_id), next_dt, ex=max(actual_interval * 100, 86400))
            ultra_precision_monitor.apply_async(
                args=[monitor_id, actual_interval],
                countdown=actual_interval
            )

        db.close()
        return result

    except Exception as e:
        error_msg = str(e)
        monitor_logger.error(f"❌ {interval}秒档监控 {monitor_id} 执行失败: {error_msg}")

        # 重试机制：根据间隔调整重试延迟
        try:
            retry_delay = min(interval, 10)  # 最大重试延迟10秒
            self.retry(countdown=retry_delay, max_retries=3)
        except Exception:
            # 重试次数用完，返回错误
            return {
                "status": "error",
                "monitor_id": monitor_id,
                "error": error_msg,
                "retries_exhausted": True
            }
    finally:
        # 释放锁
        try:
            redis_client.delete(lock_key)
        except Exception:
            pass


def should_continue_monitoring(monitor: Monitor) -> bool:
    """
    判断监控是否应该继续执行

    Args:
        monitor: 监控对象

    Returns:
        bool: 是否继续监控
    """
    # 检查监控状态
    if monitor.status != MonitorStatus.ACTIVE or not monitor.is_active:
        return False

    # 检查是否过期
    if monitor.expires_at and monitor.expires_at <= get_beijing_now():
        return False

    return True
