import random
from celery import shared_task
from link.models import Link
from linkutil.get_url import my_get_link
from logs.models import LinkLog, now_Data
import asyncio
import redis
from django.conf import settings
import json
import time
from celery import current_app

import logging
logger = logging.getLogger('link')  # 用你项目里的 logger 名称

@shared_task
def run_link_task(task_id):
    try:
        # 获取 Redis 连接
        redis_client = redis.Redis(
            host=settings.REDIS_HOST,
            port=settings.REDIS_PORT,
            db=settings.REDIS_DB
        )
        
        # 获取任务信息
        task_info = redis_client.hget("scheduler_tasks", str(task_id))
        if not task_info:
            return
        
        task_info = json.loads(task_info)
        if task_info['status'] != 'running':
            return

        current_time = time.time()
        # if 'next_run' in task_info and current_time < task_info['next_run']:
        #     logger.info(f"Task {task_id} not ready to run yet. Next run at {task_info['next_run']}")
        #     return

        # 执行任务
        link = Link.objects.filter(LinkId=task_id).first()

        if not link or not link.Swith:
            return

        if link.fail_num > 5:
            return

        linkData = link.to_dict()
        
        try:
            startTime = now_Data()
            logger.info(f"开始执行my_get_link, OfferLink: {linkData.get('OfferLink')}")
            logger.info(f"开始执行my_get_link, referrer: {linkData.get('referrer')}")
            
            final_url, log_des = asyncio.run(
                my_get_link(
                    linkData.get("OfferLink"),
                    linkData.get("TargetNation"),
                    linkData.get("redirect"),
                    linkData.get("URLldentifiers"),
                    referrer=linkData.get("referrer")
                )
            )
            logger.info(f"run_link_task1111111111111111################################")
            logger.info(f"执行my_get_link完成, final_url: {final_url}")
            logger.debug(f"log_des: {log_des}")
            endTime = now_Data()

            # 更新任务信息
            task_info['last_run'] = time.time()
            
            # 计算下次执行时间
            next_run = time.time() + (task_info['delay'] * 60)  # 转换为秒
            task_info['next_run'] = next_run + random.randint(1, 60)
            
            redis_client.hset(
                "scheduler_tasks",
                str(task_id),
                json.dumps(task_info)
            )

            # 安排下一次执行
            current_app.send_task(
                'processutil.tasks.run_link_task',
                args=[task_id],
                countdown=task_info['delay'] * 60  # 延迟执行时间（秒）
            )
            # print("========44444=======")
            # schedule_next_run(task_id, task_info['delay'], redis_client)
            # print("========55555=======")
            # 处理结果
            if log_des.get("flag", False):
                link.finalUrl = final_url if final_url else "{lpurl}"
                link.finalUpdateTime = endTime
                link.fail_num = 0
                logger.info("获取链接成功，重置fail_num")
            else:
                link.fail_num += 1
                logger.info(f"获取链接失败，fail_num增加到: {link.fail_num}")
                if link.fail_num > 5:
                    link.Swith = False
                    logger.warning(f"fail_num超过5次，关闭Link开关")
                    task_info['status'] = 'stopped'
                    redis_client.hset(
                        "scheduler_tasks",
                        str(task_id),
                        json.dumps(task_info)
                    )
            logger.info(f"link.finalUrl: {link.finalUrl}")
            link.save()
            logger.info("Link数据保存成功")

            # 记录日志
            LinkLog.objects.create(
                LinkID=int(task_id),
                LogOptionDetail=f"""
                运行LinkId: {task_id}
                详细过程: {log_des}""",
                LinkInfo=log_des["linkInfo"],
                LinkURls=log_des["urls"],
                ProxyInfo=log_des["proxyInfo"],
                FinalUrl=log_des.get("final_url", ""),
                LogResult=LinkLog.LogResultChoices.SUCCESS.name if log_des.get("flag", "") else LinkLog.LogResultChoices.FAIL.name,
                LogStartTime=startTime,
                LogEndTime=endTime
            )
            logger.info("LinkLog记录创建成功")
            return final_url
        except Exception as e:
            logger.error(f"执行任务异常: {str(e)}", exc_info=True)
            # 在异常情况下，仍然更新下次执行时间，但不再额外调度任务
            task_info['last_run'] = current_time
            next_run = current_time + (task_info['delay'] * 60)
            task_info['next_run'] = next_run + random.randint(1, 60)
            redis_client.hset(
                "scheduler_tasks",
                str(task_id),
                json.dumps(task_info)
            )
    except Exception as e:
        logger.error(f"run_link_task执行出现严重错误: {str(e)}", exc_info=True)


# 安排下一次执行
def schedule_next_run(task_id, delay_minutes, redis_client=None):
    # 获取 Celery 的 inspect 对象
    if redis_client is None:
        redis_client = redis.Redis(
            host=settings.REDIS_HOST,
            port=settings.REDIS_PORT,
            db=settings.REDIS_DB
        )
    task_key = f"scheduler_tasks_{task_id}"
    print(task_key)
    # 检查任务是否已经在运行或等待中
    if redis_client.exists(task_key):
        logger.info(f"Task {task_id} already scheduled or running, skipping")
        return
    print("========11111=======")
    # 设置任务标记，过期时间比实际执行时间稍短一些
    redis_client.setex(
        task_key,
        delay_minutes * 60 - 10,  # 过期时间比实际执行时间多1分钟
        "1"
    )
    print("========22222=======")
    # 安排下一次执行
    current_app.send_task(
        'processutil.tasks.run_link_task',
        args=[task_id],
        countdown=delay_minutes * 60  # 延迟执行时间（秒）
    )
    print("========33333=======")
    logger.info(f"Scheduled next run for task {task_id} in {delay_minutes} minutes")