"""
爬虫任务定义
为每个平台的爬虫创建Celery任务
"""
import os
import sys
import time
import uuid
import logging
from datetime import datetime
from typing import Dict, Any, Optional
from pathlib import Path

# 添加爬虫服务路径
# 使用环境变量或相对路径
crawler_path = os.getenv('CRAWLER_SERVICE_PATH', 
                         os.path.abspath(os.path.join(
                             os.path.dirname(__file__), 
                             '../../../../crawler/src'
                         )))
sys.path.insert(0, crawler_path)

from celery import Task, states
from celery.exceptions import SoftTimeLimitExceeded, Retry
from src.celery_app import app
from src.utils.logging_config import TaskLoggerAdapter
import motor.motor_asyncio
import asyncio

logger = logging.getLogger(__name__)

class CrawlerTask(Task):
    """爬虫任务基类"""
    
    autoretry_for = (Exception,)
    max_retries = 3
    default_retry_delay = 60
    
    def __init__(self):
        super().__init__()
        self.mongo_client = None
        self.db = None
        
    def before_start(self, task_id, args, kwargs):
        """任务开始前的钩子"""
        self.task_logger = TaskLoggerAdapter(logger, {'task_id': task_id})
        self.task_logger.info(f"Starting crawler task: {self.name}")
        self.start_time = time.time()
        
    def on_success(self, retval, task_id, args, kwargs):
        """任务成功的钩子"""
        duration = time.time() - self.start_time
        self.task_logger.info(f"Task completed successfully in {duration:.2f}s")
        self._save_task_result(task_id, 'success', retval, duration)
        
    def on_failure(self, exc, task_id, args, kwargs, einfo):
        """任务失败的钩子"""
        duration = time.time() - self.start_time
        self.task_logger.error(f"Task failed after {duration:.2f}s: {exc}")
        self._save_task_result(task_id, 'failed', str(exc), duration)
        
    def on_retry(self, exc, task_id, args, kwargs, einfo):
        """任务重试的钩子"""
        self.task_logger.warning(f"Task retrying due to: {exc}")
        
    def _save_task_result(self, task_id, status, result, duration):
        """保存任务结果到MongoDB"""
        asyncio.run(self._async_save_result(task_id, status, result, duration))
        
    async def _async_save_result(self, task_id, status, result, duration):
        """异步保存结果"""
        mongo_uri = os.getenv('MONGODB_URI', 'mongodb://localhost:27017')
        client = motor.motor_asyncio.AsyncIOMotorClient(mongo_uri)
        db = client['crawler_scheduler']
        
        await db.task_results.insert_one({
            'task_id': task_id,
            'task_name': self.name,
            'status': status,
            'result': result,
            'duration': duration,
            'timestamp': datetime.utcnow()
        })
        
        client.close()

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_weibo',
          soft_time_limit=300, time_limit=600)
def crawl_weibo(self, force=False):
    """
    微博热搜爬虫任务
    
    Args:
        force: 是否强制爬取（忽略增量更新）
    
    Returns:
        爬取结果
    """
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        # 导入爬虫
        from spiders.weibo_hot_spider import WeiboHotSpider
        
        # 执行爬虫
        spider = WeiboHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'weibo',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        self.task_logger.error('Task exceeded soft time limit')
        raise self.retry(countdown=120)
    except Exception as e:
        self.task_logger.error(f'Crawler failed: {e}')
        raise self.retry(exc=e, countdown=60)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_zhihu',
          soft_time_limit=300, time_limit=600)
def crawl_zhihu(self, force=False):
    """知乎热榜爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.zhihu_hot_spider import ZhihuHotSpider
        
        spider = ZhihuHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'zhihu',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=120)
    except Exception as e:
        raise self.retry(exc=e, countdown=60)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_toutiao',
          soft_time_limit=300, time_limit=600)
def crawl_toutiao(self, force=False):
    """头条热点爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.toutiao_hot_spider import ToutiaoHotSpider
        
        spider = ToutiaoHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'toutiao',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=120)
    except Exception as e:
        raise self.retry(exc=e, countdown=60)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_baidu',
          soft_time_limit=300, time_limit=600)
def crawl_baidu(self, force=False):
    """百度热搜爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.baidu_hot_spider import BaiduHotSpider
        
        spider = BaiduHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'baidu',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=120)
    except Exception as e:
        raise self.retry(exc=e, countdown=60)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_xiaohongshu',
          soft_time_limit=400, time_limit=800)
def crawl_xiaohongshu(self, force=False):
    """小红书热门爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.xiaohongshu_hot_spider import XiaohongshuHotSpider
        
        spider = XiaohongshuHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'xiaohongshu',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=180)
    except Exception as e:
        raise self.retry(exc=e, countdown=90)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_douyin',
          soft_time_limit=400, time_limit=800)
def crawl_douyin(self, force=False):
    """抖音热点爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.douyin_hot_spider import DouyinHotSpider
        
        spider = DouyinHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'douyin',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=180)
    except Exception as e:
        raise self.retry(exc=e, countdown=90)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_twitter',
          soft_time_limit=500, time_limit=1000)
def crawl_twitter(self, force=False):
    """Twitter热点爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.twitter_hot_spider import TwitterHotSpider
        
        spider = TwitterHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'twitter',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=300)
    except Exception as e:
        raise self.retry(exc=e, countdown=120)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_reddit',
          soft_time_limit=500, time_limit=1000)
def crawl_reddit(self, force=False):
    """Reddit热门爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.reddit_hot_spider import RedditHotSpider
        
        spider = RedditHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'reddit',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=300)
    except Exception as e:
        raise self.retry(exc=e, countdown=120)

@app.task(base=CrawlerTask, bind=True, name='src.tasks.crawler.crawl_youtube',
          soft_time_limit=600, time_limit=1200)
def crawl_youtube(self, force=False):
    """YouTube热门爬虫任务"""
    task_id = self.request.id or str(uuid.uuid4())
    
    try:
        from spiders.youtube_hot_spider import YoutubeHotSpider
        
        spider = YoutubeHotSpider()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        
        result = loop.run_until_complete(spider.crawl())
        loop.close()
        
        return {
            'task_id': task_id,
            'platform': 'youtube',
            'items_count': result.get('count', 0),
            'success': True,
            'timestamp': datetime.utcnow().isoformat()
        }
        
    except SoftTimeLimitExceeded:
        raise self.retry(countdown=600)
    except Exception as e:
        raise self.retry(exc=e, countdown=300)