import json
import time
import sys
import os
from typing import List, Dict, Any, Optional
from datetime import datetime
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

from src.common.unified_logging import get_logger
from .database import ShareDatabaseManager
from .models import CloudResource, CloudType, ShareStatus, CrawlResult
from .share_utils import get_tianyi_share_data, extract_share_data

class CloudResourceCrawler:
    """云盘资源爬虫"""
    
    def __init__(self, db_manager: ShareDatabaseManager, max_workers: int = 3, delay_between_requests: float = 1.0):
        self.db = db_manager
        self.max_workers = max_workers
        self.delay_between_requests = delay_between_requests
        self.logger = get_logger(__name__)
        self._stop_event = threading.Event()
        
    def crawl_single_resource(self, resource: CloudResource, depth: int = 3) -> bool:
        """爬取单个资源的文件信息"""
        try:
            self.logger.info(f"开始爬取资源: {resource.clouds_type} - {resource.share_code}")
            
            # 目前只支持天翼云盘
            if resource.clouds_type != CloudType.TIANYI.value:
                self.logger.warning(f"暂不支持的云盘类型: {resource.clouds_type}")
                return False
            
            # 调用天翼云盘抓取函数
            result = get_tianyi_share_data(
                share_code=resource.share_code,
                access_code=resource.access_code,
                depth=depth
            )
            
            if result.get('success'):
                # 更新分享名称和状态
                share_info = result.get('shareInfo', {})
                share_name = share_info.get('shareName', resource.share_name)
                share_status = ShareStatus.PROCESSED.value
                
                # 将结果转换为JSON字符串
                file_info_json = json.dumps(result, ensure_ascii=False, indent=2)
                
                # 更新数据库
                success = self.db.update_file_info(
                    resource_id=resource.id,
                    file_info_json=file_info_json,
                    share_name=share_name,
                    share_status=share_status
                )
                
                if success:
                    self.logger.info(f"资源爬取成功: {resource.share_code}")
                    return True
                else:
                    self.logger.error(f"更新数据库失败: {resource.share_code}")
                    return False
            else:
                # 爬取失败，更新状态
                error_msg = result.get('message', result.get('error', '未知错误'))
                self.logger.warning(f"资源爬取失败: {resource.share_code}, 错误: {error_msg}")
                
                # 根据错误类型设置状态
                if '访问码' in error_msg or '密码' in error_msg:
                    share_status = ShareStatus.PASSWORD_ERROR.value
                elif '过期' in error_msg or '失效' in error_msg:
                    share_status = ShareStatus.EXPIRED.value
                elif '删除' in error_msg:
                    share_status = ShareStatus.DELETED.value
                else:
                    share_status = ShareStatus.INVALID.value
                
                # 更新状态到数据库
                error_info = {
                    'success': False,
                    'error': error_msg,
                    'crawl_time': datetime.now().isoformat()
                }
                
                self.db.update_file_info(
                    resource_id=resource.id,
                    file_info_json=json.dumps(error_info, ensure_ascii=False),
                    share_status=share_status
                )
                
                return False
                
        except Exception as e:
            error_msg = f"爬取资源时发生异常: {str(e)}"
            self.logger.error(error_msg)
            
            # 记录异常信息
            error_info = {
                'success': False,
                'error': error_msg,
                'crawl_time': datetime.now().isoformat()
            }
            
            try:
                self.db.update_file_info(
                    resource_id=resource.id,
                    file_info_json=json.dumps(error_info, ensure_ascii=False),
                    share_status=ShareStatus.INVALID.value
                )
            except Exception as db_error:
                self.logger.error(f"更新数据库异常信息失败: {str(db_error)}")
            
            return False
    
    def crawl_batch(self, resources: List[CloudResource], depth: int = 3, 
                   use_threading: bool = True) -> CrawlResult:
        """批量爬取资源"""
        result = CrawlResult()
        
        if not resources:
            return result
        
        self.logger.info(f"开始批量爬取 {len(resources)} 个资源")
        
        if use_threading and len(resources) > 1:
            # 使用多线程爬取
            with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                # 提交任务
                future_to_resource = {
                    executor.submit(self._crawl_with_delay, resource, depth): resource 
                    for resource in resources
                }
                
                # 处理结果
                for future in as_completed(future_to_resource):
                    if self._stop_event.is_set():
                        break
                        
                    resource = future_to_resource[future]
                    try:
                        success = future.result()
                        if success:
                            result.add_success(resource)
                        else:
                            result.add_error(f"爬取失败: {resource.share_code}")
                    except Exception as e:
                        error_msg = f"爬取资源 {resource.share_code} 时发生异常: {str(e)}"
                        result.add_error(error_msg)
        else:
            # 单线程爬取
            for resource in resources:
                if self._stop_event.is_set():
                    break
                    
                try:
                    success = self.crawl_single_resource(resource, depth)
                    if success:
                        result.add_success(resource)
                    else:
                        result.add_error(f"爬取失败: {resource.share_code}")
                    
                    # 延迟
                    if self.delay_between_requests > 0:
                        time.sleep(self.delay_between_requests)
                        
                except Exception as e:
                    error_msg = f"爬取资源 {resource.share_code} 时发生异常: {str(e)}"
                    result.add_error(error_msg)
        
        self.logger.info(f"批量爬取完成: {result.get_summary()}")
        return result
    
    def _crawl_with_delay(self, resource: CloudResource, depth: int) -> bool:
        """带延迟的爬取（用于多线程）"""
        # 添加随机延迟避免并发冲突
        import random
        delay = self.delay_between_requests + random.uniform(0, 0.5)
        time.sleep(delay)
        
        return self.crawl_single_resource(resource, depth)
    
    def crawl_pending_resources(self, clouds_type: str = None, limit: int = 100, 
                              depth: int = 3, use_threading: bool = True) -> CrawlResult:
        """爬取待处理的资源"""
        try:
            # 获取待处理的资源
            pending_resources_data = self.db.get_pending_resources(clouds_type, limit)
            
            if not pending_resources_data:
                self.logger.info("没有待处理的资源")
                return CrawlResult()
            
            # 转换为CloudResource对象
            pending_resources = []
            for resource_data in pending_resources_data:
                resource = CloudResource.from_dict(resource_data)
                pending_resources.append(resource)
            
            self.logger.info(f"找到 {len(pending_resources)} 个待处理资源")
            
            # 批量爬取
            return self.crawl_batch(pending_resources, depth, use_threading)
            
        except Exception as e:
            result = CrawlResult()
            error_msg = f"获取待处理资源时发生错误: {str(e)}"
            result.add_error(error_msg)
            self.logger.error(error_msg)
            return result
    
    def crawl_by_share_codes(self, share_codes: List[str], clouds_type: str = CloudType.TIANYI.value,
                           depth: int = 3, use_threading: bool = True) -> CrawlResult:
        """根据分享码列表爬取资源"""
        try:
            resources = []
            for share_code in share_codes:
                # 从数据库获取资源信息
                resource_data = self.db.get_resource(clouds_type, share_code)
                if resource_data:
                    resource = CloudResource.from_dict(resource_data)
                    resources.append(resource)
                else:
                    self.logger.warning(f"未找到分享码对应的资源: {share_code}")
            
            if resources:
                return self.crawl_batch(resources, depth, use_threading)
            else:
                result = CrawlResult()
                result.add_error("没有找到有效的资源")
                return result
                
        except Exception as e:
            result = CrawlResult()
            error_msg = f"根据分享码爬取资源时发生错误: {str(e)}"
            result.add_error(error_msg)
            self.logger.error(error_msg)
            return result
    
    def start_auto_crawl(self, interval_seconds: int = 300, batch_size: int = 10, 
                        depth: int = 3, clouds_type: str = None):
        """启动自动爬取任务"""
        self.logger.info(f"启动自动爬取任务，间隔: {interval_seconds}秒，批次大小: {batch_size}")
        
        while not self._stop_event.is_set():
            try:
                # 爬取一批待处理资源
                result = self.crawl_pending_resources(
                    clouds_type=clouds_type,
                    limit=batch_size,
                    depth=depth,
                    use_threading=True
                )
                
                if result.total_count > 0:
                    self.logger.info(f"自动爬取完成: {result.get_summary()}")
                else:
                    self.logger.info("没有待处理的资源，等待下次检查")
                
                # 等待下次执行
                self._stop_event.wait(interval_seconds)
                
            except Exception as e:
                self.logger.error(f"自动爬取任务发生错误: {str(e)}")
                # 发生错误时等待较短时间后重试
                self._stop_event.wait(60)
    
    def stop_auto_crawl(self):
        """停止自动爬取任务"""
        self.logger.info("停止自动爬取任务")
        self._stop_event.set()
    
    def get_crawl_statistics(self) -> Dict[str, Any]:
        """获取爬取统计信息"""
        try:
            with self.db.connection.cursor() as cursor:
                # 总数统计
                cursor.execute("SELECT COUNT(*) as total FROM cloud_resources")
                total = cursor.fetchone()[0]
                
                # 已处理统计
                cursor.execute("""
                    SELECT COUNT(*) as processed 
                    FROM cloud_resources 
                    WHERE file_info_json IS NOT NULL AND file_info_json != ''
                """)
                processed = cursor.fetchone()[0]
                
                # 未处理统计
                cursor.execute("""
                    SELECT COUNT(*) as unprocessed 
                    FROM cloud_resources 
                    WHERE file_info_json IS NULL OR file_info_json = ''
                """)
                unprocessed = cursor.fetchone()[0]
                
                # 按状态统计
                cursor.execute("""
                    SELECT share_status, COUNT(*) as count 
                    FROM cloud_resources 
                    GROUP BY share_status
                """)
                by_status = {row[0]: row[1] for row in cursor.fetchall()}
                
                # 按云盘类型统计处理情况
                cursor.execute("""
                    SELECT 
                        clouds_type,
                        SUM(CASE WHEN file_info_json IS NOT NULL AND file_info_json != '' THEN 1 ELSE 0 END) as processed,
                        SUM(CASE WHEN file_info_json IS NULL OR file_info_json = '' THEN 1 ELSE 0 END) as unprocessed
                    FROM cloud_resources 
                    GROUP BY clouds_type
                """)
                by_type = {}
                for row in cursor.fetchall():
                    by_type[row[0]] = {
                        'processed': row[1],
                        'unprocessed': row[2],
                        'total': row[1] + row[2]
                    }
                
                return {
                    'total': total,
                    'processed': processed,
                    'unprocessed': unprocessed,
                    'by_status': by_status,
                    'by_type': by_type,
                    'processing_rate': round((processed / total * 100) if total > 0 else 0, 2)
                }
                
        except Exception as e:
            self.logger.error(f"获取爬取统计信息失败: {str(e)}")
            return {}
    
    def retry_failed_resources(self, limit: int = 50, depth: int = 3) -> CrawlResult:
        """重试失败的资源"""
        try:
            # 查询失败的资源（状态为invalid或包含错误信息的）
            with self.db.connection.cursor() as cursor:
                cursor.execute("""
                    SELECT id, clouds_type, share_code, access_code, share_name, full_url, 
                           share_status, share_time, created_at, updated_at
                    FROM cloud_resources 
                    WHERE share_status IN ('invalid', 'password_error') 
                       OR (file_info_json LIKE '%"success": false%')
                    ORDER BY updated_at ASC
                    LIMIT %s
                """, (limit,))
                
                failed_resources_data = cursor.fetchall()
            
            if not failed_resources_data:
                self.logger.info("没有需要重试的失败资源")
                return CrawlResult()
            
            # 转换为CloudResource对象
            failed_resources = []
            for resource_data in failed_resources_data:
                resource_dict = {
                    'id': resource_data[0],
                    'clouds_type': resource_data[1],
                    'share_code': resource_data[2],
                    'access_code': resource_data[3],
                    'share_name': resource_data[4],
                    'full_url': resource_data[5],
                    'share_status': resource_data[6],
                    'share_time': resource_data[7],
                    'created_at': resource_data[8],
                    'updated_at': resource_data[9]
                }
                resource = CloudResource.from_dict(resource_dict)
                failed_resources.append(resource)
            
            self.logger.info(f"找到 {len(failed_resources)} 个失败资源，开始重试")
            
            # 批量重试
            return self.crawl_batch(failed_resources, depth, use_threading=True)
            
        except Exception as e:
            result = CrawlResult()
            error_msg = f"重试失败资源时发生错误: {str(e)}"
            result.add_error(error_msg)
            self.logger.error(error_msg)
            return result


class CrawlerScheduler:
    """爬虫调度器"""
    
    def __init__(self, crawler: CloudResourceCrawler):
        self.crawler = crawler
        self.logger = logging.getLogger(__name__)
        self._scheduler_thread = None
        self._is_running = False
    
    def start_scheduled_crawl(self, interval_minutes: int = 30, batch_size: int = 20):
        """启动定时爬取任务"""
        if self._is_running:
            self.logger.warning("定时爬取任务已在运行")
            return
        
        self._is_running = True
        self._scheduler_thread = threading.Thread(
            target=self.crawler.start_auto_crawl,
            args=(interval_minutes * 60, batch_size, 3, None),
            daemon=True
        )
        self._scheduler_thread.start()
        self.logger.info(f"定时爬取任务已启动，间隔: {interval_minutes}分钟")
    
    def stop_scheduled_crawl(self):
        """停止定时爬取任务"""
        if not self._is_running:
            return
        
        self.crawler.stop_auto_crawl()
        if self._scheduler_thread:
            self._scheduler_thread.join(timeout=5)
        
        self._is_running = False
        self.logger.info("定时爬取任务已停止")
    
    def is_running(self) -> bool:
        """检查是否正在运行"""
        return self._is_running


if __name__ == "__main__":
    # 测试爬虫功能
    logging.basicConfig(level=logging.INFO)
    
    from .database import init_database
    
    db = init_global_session()
    if db:
        crawler = CloudResourceCrawler(db, max_workers=2, delay_between_requests=1.0)
        
        # 测试爬取待处理资源
        result = crawler.crawl_pending_resources(limit=5)
        print(f"爬取结果: {result.get_summary()}")
        
        # 获取统计信息
        stats = crawler.get_crawl_statistics()
        print(f"爬取统计: {stats}")
        
        db.disconnect()