#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
爬虫管理器
整合通用爬虫、AI解析、浏览器管理等功能
提供统一的爬虫服务接口
"""

import os
import sys
import json
import logging
import time
import random
from datetime import datetime
from typing import Dict, List, Any, Optional, Union
from pathlib import Path

# 添加项目路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import DB_CONFIG, LOG_CONFIG, BROWSER_CONFIG
from core.universal_spider import UniversalSpider
from core.ai_content_parser import AIContentParser
from core.database_manager import DatabaseManager
from browser_manager import BrowserManager

# 配置日志
logging.basicConfig(
    level=getattr(logging, LOG_CONFIG["level"]),
    format=LOG_CONFIG["format"]
)
logger = logging.getLogger(__name__)


class SpiderTaskManager:
    """爬虫任务管理器"""
    
    def __init__(self):
        self.tasks = []
        self.task_history = []
        self.stats = {
            'total_tasks': 0,
            'completed_tasks': 0,
            'failed_tasks': 0,
            'total_articles': 0
        }
    
    def add_task(self, task_config: Dict[str, Any]) -> int:
        """添加爬虫任务"""
        task_id = len(self.tasks) + 1
        task = {
            'id': task_id,
            'config': task_config,
            'status': 'pending',
            'created_at': datetime.now(),
            'started_at': None,
            'completed_at': None,
            'result': None,
            'error': None
        }
        self.tasks.append(task)
        self.stats['total_tasks'] += 1
        logger.info(f"添加爬虫任务 #{task_id}: {task_config.get('name', 'Unknown')}")
        return task_id
    
    def get_task(self, task_id: int) -> Optional[Dict]:
        """获取任务信息"""
        for task in self.tasks:
            if task['id'] == task_id:
                return task
        return None
    
    def update_task_status(self, task_id: int, status: str, **kwargs):
        """更新任务状态"""
        task = self.get_task(task_id)
        if task:
            task['status'] = status
            if status == 'running':
                task['started_at'] = datetime.now()
            elif status in ['completed', 'failed']:
                task['completed_at'] = datetime.now()
                if status == 'completed':
                    self.stats['completed_tasks'] += 1
                else:
                    self.stats['failed_tasks'] += 1
            
            # 更新其他字段
            for key, value in kwargs.items():
                task[key] = value
    
    def get_pending_tasks(self) -> List[Dict]:
        """获取待处理的任务"""
        return [task for task in self.tasks if task['status'] == 'pending']
    
    def get_task_stats(self) -> Dict[str, Any]:
        """获取任务统计信息"""
        return self.stats.copy()

    def get_statistics(self) -> Dict[str, Any]:
        """获取任务统计信息（别名方法）"""
        return self.get_task_stats()

    def get_all_tasks(self) -> List[Dict]:
        """获取所有任务"""
        return self.tasks.copy()

    def get_tasks_by_status(self, status: str) -> List[Dict]:
        """根据状态获取任务"""
        return [task for task in self.tasks if task['status'] == status]


class SpiderConfigManager:
    """爬虫配置管理器"""
    
    def __init__(self):
        self.default_config = {
            'tool_type': 'requests',  # requests, playwright, selenium
            'use_ai': False,
            'ai_provider': 'mock',
            'content_type': 'news',
            'max_links': 50,
            'delay_range': [1, 3],
            'timeout': 30,
            'retry_count': 3,
            'save_images': False,
            'extract_links': True
        }
        
        self.ai_config = {
            'providers': {
                'huawei': {
                    'api_url': os.getenv('HUAWEI_AI_URL', ''),
                    'api_key': os.getenv('HUAWEI_AI_KEY', ''),
                    'model': 'default'
                },
                'openai': {
                    'api_url': os.getenv('OPENAI_API_URL', 'https://api.openai.com/v1/chat/completions'),
                    'api_key': os.getenv('OPENAI_API_KEY', ''),
                    'model': 'gpt-3.5-turbo'
                },
                'dify': {
                    'api_url': os.getenv('DIFY_API_URL', ''),
                    'api_key': os.getenv('DIFY_API_KEY', '')
                }
            },
            'max_text_length': 3000,
            'batch_delay': 1
        }
    
    def get_spider_config(self, custom_config: Optional[Dict] = None) -> Dict[str, Any]:
        """获取爬虫配置"""
        config = self.default_config.copy()
        if custom_config:
            config.update(custom_config)
        
        # 添加AI配置
        config['ai'] = self.ai_config
        config['database'] = DB_CONFIG
        
        return config
    
    def get_browser_config(self) -> Dict[str, Any]:
        """获取浏览器配置"""
        return BROWSER_CONFIG


class SpiderManager:
    """爬虫管理器主类"""
    
    def __init__(self):
        self.task_manager = SpiderTaskManager()
        self.config_manager = SpiderConfigManager()
        self.browser_manager = None
        self.current_spider = None
        
        # 初始化数据库管理器
        try:
            self.db_manager = DatabaseManager()
            logger.info("数据库管理器初始化成功")
        except Exception as e:
            logger.warning(f"数据库管理器初始化失败: {e}")
            self.db_manager = None
        
        # 初始化浏览器管理器
        try:
            self.browser_manager = BrowserManager()
            logger.info("浏览器管理器初始化成功")
        except Exception as e:
            logger.warning(f"浏览器管理器初始化失败: {e}")
        
        logger.info("爬虫管理器初始化完成")
    
    def initialize_database(self) -> Dict[str, Any]:
        """初始化数据库"""
        if not self.db_manager:
            return {'success': False, 'error': '数据库管理器未初始化'}
        
        try:
            success = self.db_manager.initialize_database()
            if success:
                # 插入示例数据
                self.db_manager.insert_sample_schedule_task()
                
                # 获取状态
                status = self.db_manager.get_database_status()
                
                return {
                    'success': True,
                    'message': '数据库初始化成功',
                    'status': status
                }
            else:
                return {'success': False, 'error': '数据库初始化失败'}
                
        except Exception as e:
            logger.error(f"初始化数据库失败: {e}")
            return {'success': False, 'error': str(e)}
    
    def get_database_status(self) -> Dict[str, Any]:
        """获取数据库状态"""
        if not self.db_manager:
            return {'success': False, 'error': '数据库管理器未初始化'}
        
        try:
            status = self.db_manager.get_database_status()
            return {'success': True, 'status': status}
        except Exception as e:
            logger.error(f"获取数据库状态失败: {e}")
            return {'success': False, 'error': str(e)}
    
    def create_database_table(self, table_name: str) -> Dict[str, Any]:
        """创建数据库表"""
        if not self.db_manager:
            return {'success': False, 'error': '数据库管理器未初始化'}
        
        try:
            if self.db_manager.check_table_exists(table_name):
                return {'success': True, 'message': f'表 {table_name} 已存在'}
            
            success = self.db_manager.create_table(table_name)
            if success:
                return {'success': True, 'message': f'表 {table_name} 创建成功'}
            else:
                return {'success': False, 'error': f'表 {table_name} 创建失败'}
                
        except Exception as e:
            logger.error(f"创建表 {table_name} 失败: {e}")
            return {'success': False, 'error': str(e)}
    
    def optimize_database(self) -> Dict[str, Any]:
        """优化数据库"""
        if not self.db_manager:
            return {'success': False, 'error': '数据库管理器未初始化'}
        
        try:
            success = self.db_manager.optimize_tables()
            if success:
                return {'success': True, 'message': '数据库优化完成'}
            else:
                return {'success': False, 'error': '数据库优化失败'}
                
        except Exception as e:
            logger.error(f"优化数据库失败: {e}")
            return {'success': False, 'error': str(e)}
    
    def create_spider_task(self, name: str, urls: List[str], 
                          config: Optional[Dict] = None) -> int:
        """创建爬虫任务"""
        task_config = {
            'name': name,
            'urls': urls,
            'spider_config': self.config_manager.get_spider_config(config),
            'created_by': 'spider_manager'
        }
        
        return self.task_manager.add_task(task_config)
    
    def run_task(self, task_id: int) -> Dict[str, Any]:
        """运行单个任务"""
        task = self.task_manager.get_task(task_id)
        if not task:
            return {'success': False, 'error': f'任务 #{task_id} 不存在'}
        
        if task['status'] != 'pending':
            return {'success': False, 'error': f'任务 #{task_id} 状态不正确: {task["status"]}'}
        
        logger.info(f"开始执行任务 #{task_id}: {task['config']['name']}")
        
        # 更新任务状态
        self.task_manager.update_task_status(task_id, 'running')
        
        try:
            # 创建爬虫实例
            spider_config = task['config']['spider_config']
            spider = UniversalSpider(spider_config)
            self.current_spider = spider
            
            # 执行爬虫任务
            urls = task['config']['urls']
            results = []
            
            for url in urls:
                try:
                    # 模拟任务执行
                    result = self._run_single_url(spider, url, spider_config)
                    results.append(result)
                    
                except Exception as e:
                    logger.error(f"处理URL失败 {url}: {e}")
                    results.append({'url': url, 'success': False, 'error': str(e)})
            
            # 统计结果
            success_count = sum(1 for r in results if r.get('success', False))
            total_articles = sum(r.get('article_count', 0) for r in results)
            
            # 更新任务状态
            task_result = {
                'urls_processed': len(urls),
                'urls_success': success_count,
                'total_articles': total_articles,
                'results': results
            }
            
            self.task_manager.update_task_status(
                task_id, 'completed', 
                result=task_result
            )
            self.task_manager.stats['total_articles'] += total_articles
            
            logger.info(f"任务 #{task_id} 执行完成，处理 {len(urls)} 个URL，成功 {success_count} 个，获取 {total_articles} 篇文章")
            
            return {'success': True, 'result': task_result}
            
        except Exception as e:
            logger.error(f"任务 #{task_id} 执行失败: {e}")
            self.task_manager.update_task_status(
                task_id, 'failed', 
                error=str(e)
            )
            return {'success': False, 'error': str(e)}
        finally:
            if self.current_spider:
                self.current_spider.close()
                self.current_spider = None
    
    def _run_single_url(self, spider: UniversalSpider, url: str, 
                       config: Dict[str, Any]) -> Dict[str, Any]:
        """运行单个URL的爬取"""
        try:
            logger.info(f"开始爬取URL: {url}")
            
            # 获取网站链接
            max_links = config.get('max_links', 50)
            links = spider.get_site_links(url, max_links)
            
            if not links:
                return {
                    'url': url,
                    'success': False,
                    'error': '未获取到任何链接',
                    'article_count': 0
                }
            
            # 提取文章内容
            articles = []
            use_ai = config.get('use_ai', False)
            
            for link in links:
                try:
                    article_data = spider.extract_article_content(
                        link, 
                        use_ai=use_ai,
                        custom_fields=config.get('extract_fields')
                    )
                    
                    if article_data and article_data.get('正文') and len(article_data['正文']) > 50:
                        # 添加URL信息
                        article_data['url'] = link
                        article_data['来源'] = url
                        
                        # 保存到数据库
                        if spider.save_article(article_data):
                            articles.append(article_data)
                    
                    # 添加延迟
                    delay_range = config.get('delay_range', [1, 3])
                    time.sleep(random.uniform(delay_range[0], delay_range[1]))
                    
                except Exception as e:
                    logger.error(f"处理文章失败 {link}: {e}")
                    continue
            
            return {
                'url': url,
                'success': True,
                'links_found': len(links),
                'article_count': len(articles),
                'articles': articles[:5]  # 只返回前5篇文章的预览
            }
            
        except Exception as e:
            logger.error(f"爬取URL失败 {url}: {e}")
            return {
                'url': url,
                'success': False,
                'error': str(e),
                'article_count': 0
            }
    
    def run_all_tasks(self) -> Dict[str, Any]:
        """运行所有待处理的任务"""
        pending_tasks = self.task_manager.get_pending_tasks()
        
        if not pending_tasks:
            logger.info("没有待处理的任务")
            return {'success': True, 'message': '没有待处理的任务'}
        
        logger.info(f"开始执行 {len(pending_tasks)} 个待处理任务")
        
        results = []
        for task in pending_tasks:
            result = self.run_task(task['id'])
            results.append({
                'task_id': task['id'],
                'task_name': task['config']['name'],
                'result': result
            })
        
        # 统计结果
        success_count = sum(1 for r in results if r['result']['success'])
        
        return {
            'success': True,
            'total_tasks': len(pending_tasks),
            'success_tasks': success_count,
            'failed_tasks': len(pending_tasks) - success_count,
            'results': results
        }
    
    def get_task_status(self, task_id: Optional[int] = None) -> Dict[str, Any]:
        """获取任务状态"""
        if task_id:
            task = self.task_manager.get_task(task_id)
            if task:
                return {
                    'task': task,
                    'stats': self.task_manager.get_task_stats()
                }
            else:
                return {'error': f'任务 #{task_id} 不存在'}
        else:
            return {
                'all_tasks': self.task_manager.tasks,
                'stats': self.task_manager.get_task_stats()
            }
    
    def test_ai_parser(self, text: str, content_type: str = 'news') -> Dict[str, Any]:
        """测试AI解析器"""
        try:
            ai_config = self.config_manager.ai_config
            parser = AIContentParser(ai_config)
            
            result = parser.parse_content(text, content_type)
            status = parser.get_provider_status()
            
            return {
                'success': True,
                'result': result,
                'provider_status': status
            }
            
        except Exception as e:
            logger.error(f"AI解析器测试失败: {e}")
            return {'success': False, 'error': str(e)}
    
    def test_browser_manager(self) -> Dict[str, Any]:
        """测试浏览器管理器"""
        if not self.browser_manager:
            return {'success': False, 'error': '浏览器管理器未初始化'}
        
        try:
            # 检查浏览器状态
            status = self.browser_manager.check_browser_status()
            
            # 如果浏览器不存在，尝试下载
            if not status['browser_exists']:
                logger.info("浏览器不存在，开始下载...")
                download_result = self.browser_manager.download_firefox()
                if not download_result:
                    return {'success': False, 'error': '浏览器下载失败'}
            
            # 测试浏览器启动
            test_result = self.browser_manager.test_firefox_launch()
            
            return {
                'success': True,
                'browser_status': status,
                'test_result': test_result
            }
            
        except Exception as e:
            logger.error(f"浏览器管理器测试失败: {e}")
            return {'success': False, 'error': str(e)}
    
    def get_system_status(self) -> Dict[str, Any]:
        """获取系统状态"""
        status = {
            'spider_manager': 'running',
            'task_stats': self.task_manager.get_task_stats(),
            'browser_manager': 'not_available',
            'ai_parser': 'not_tested',
            'database': 'not_available'
        }
        
        # 检查数据库管理器
        if self.db_manager:
            try:
                db_status = self.db_manager.get_database_status()
                if db_status['database_exists']:
                    status['database'] = f"available ({db_status['existing_tables']}/{db_status['total_tables']} tables)"
                else:
                    status['database'] = 'database_missing'
            except:
                status['database'] = 'error'
        
        # 检查浏览器管理器
        if self.browser_manager:
            try:
                browser_status = self.browser_manager.check_browser_status()
                status['browser_manager'] = 'available' if browser_status['browser_exists'] else 'browser_missing'
            except:
                status['browser_manager'] = 'error'
        
        # 检查AI解析器
        try:
            ai_config = self.config_manager.ai_config
            parser = AIContentParser(ai_config)
            provider_status = parser.get_provider_status()
            available_providers = [name for name, available in provider_status.items() if available]
            status['ai_parser'] = f"available ({', '.join(available_providers)})" if available_providers else 'no_providers'
        except:
            status['ai_parser'] = 'error'
        
        return status
    
    def cleanup(self):
        """清理资源"""
        if self.current_spider:
            self.current_spider.close()
        
        if self.browser_manager:
            # 浏览器管理器的清理由其自身处理
            pass
        
        logger.info("爬虫管理器资源已清理")

    def create_task(self, task_name: str, site_url: str, content_type: str = 'news', 
                   **kwargs) -> Dict[str, Any]:
        """创建爬虫任务"""
        try:
            task_id = f"task_{int(time.time() * 1000)}_{random.randint(1000, 9999)}"
            
            task_data = {
                'task_id': task_id,
                'task_name': task_name,
                'site_url': site_url,
                'content_type': content_type,
                'status': '1',  # 1=待执行
                'created_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'max_links': kwargs.get('max_links', 10),
                'use_ai': kwargs.get('use_ai', False),
                'tool_type': kwargs.get('tool_type', 'requests')
            }
            
            # 添加到任务管理器
            self.task_manager.add_task(task_data)
            
            logger.info(f"创建任务成功: {task_name} ({task_id})")
            
            return {
                'success': True,
                'task_id': task_id,
                'message': f'任务 {task_name} 创建成功'
            }
            
        except Exception as e:
            logger.error(f"创建任务失败: {e}")
            return {
                'success': False,
                'error': str(e)
            }

    def get_task_statistics(self) -> Dict[str, Any]:
        """获取任务统计信息"""
        try:
            stats = self.task_manager.get_statistics()
            return stats
        except Exception as e:
            logger.error(f"获取任务统计失败: {e}")
            return {
                'total_tasks': 0,
                'completed_tasks': 0,
                'failed_tasks': 0,
                'total_articles': 0
            }

    def get_task_list(self) -> List[Dict]:
        """获取任务列表"""
        return self.task_manager.get_all_tasks()


# 使用示例
def main():
    """主函数"""
    try:
        # 创建爬虫管理器
        manager = SpiderManager()
        
        # 检查系统状态
        system_status = manager.get_system_status()
        print("系统状态:")
        for component, status in system_status.items():
            print(f"  {component}: {status}")
        
        # 创建测试任务
        task_id = manager.create_spider_task(
            name="测试新闻网站爬取",
            urls=["https://news.cctv.com/"],
            config={
                'tool_type': 'requests',
                'use_ai': False,
                'max_links': 10
            }
        )
        
        print(f"\n创建测试任务 #{task_id}")
        
        # 运行任务
        result = manager.run_task(task_id)
        print(f"任务执行结果: {result}")
        
        # 获取任务状态
        status = manager.get_task_status()
        print(f"\n任务统计: {status['stats']}")
        
    except Exception as e:
        logger.error(f"示例运行失败: {e}")
    finally:
        if 'manager' in locals():
            manager.cleanup()


if __name__ == "__main__":
    main() 