import json
import logging
import requests
from celery import shared_task
from django.core.cache import caches
from django.utils import timezone
from .models import CrawlerConfig

logger = logging.getLogger('myproject')

# 使用新配置的 Redis 缓存
cache_status = caches['cache_status']

@shared_task
def start_crawler_task(task_id, user_id, proxy_info=None):
    try:
        # 获取 CrawlerConfig 实例
        crawler_config = CrawlerConfig.objects.get(task_id=task_id)
        
        # 将JSON字符串转换为列表
        id_list = json.loads(crawler_config.id_list) if crawler_config.id_list else []
        creator_list = json.loads(crawler_config.creator_list) if crawler_config.creator_list else []
        
        # 准备爬虫参数
        params = {
            'task_id': task_id,
            'user_id': user_id,
            'platform_name': crawler_config.platform_name,
            'crawler_type': crawler_config.crawler_type,
            'keywords': crawler_config.keyword,
            'id_list': id_list,
            'creator_list': creator_list,
            'is_crawler_comment': crawler_config.is_crawler_comment,
            'is_crawler_sub_comment': crawler_config.is_crawler_sub_comment,
            'proxy_info': proxy_info
        }

        # 请求其他微服务的接口，启动爬虫
        crawler_service_url = 'http://127.0.0.1:8088/start_crawler'
        response = requests.post(crawler_service_url, json=params, timeout=10)
        response.raise_for_status()
        logger.info(f"Crawler 已启动，task_id: {task_id}")

    except CrawlerConfig.DoesNotExist:
        logger.error(f"CrawlerConfig 不存在，task_id: {task_id}")
    except json.JSONDecodeError as e:
        logger.error(f"JSON解析错误，task_id: {task_id}, 错误: {e}")
    except requests.exceptions.RequestException as e:
        logger.error(f"启动爬虫任务出错，task_id: {task_id}, 错误: {e}")
    except Exception as e:
        logger.error(f"启动爬虫任务出错，task_id: {task_id}, 错误: {e}")
