"""
主程序入口

整合所有功能模块，提供完整的爬虫功能。
"""

import sys
import time
from pathlib import Path
from typing import List, Optional
import argparse

# 添加项目根目录到Python路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from src.config_loader import config_loader
from src.scraper.scraper_factory import ScraperFactory
from src.utils.logger import get_logger, scraping_logger
from src.utils.markdown_writer import markdown_writer
from src.models.data_model import ScrapingResult


class WebScraper:
    """网络爬虫主类"""

    def __init__(self):
        """初始化爬虫"""
        self.logger = get_logger('main')
        self.scraping_logger = scraping_logger

        self.logger.info("初始化网络爬虫系统")

    def run(self, config_file: Optional[str] = None, max_pages: Optional[int] = None) -> List[ScrapingResult]:
        """
        运行爬虫

        Args:
            config_file: 配置文件路径（可选）
            max_pages: 最大爬取页数（可选）

        Returns:
            爬取结果列表
        """
        try:
            # 加载配置
            if config_file:
                # TODO: 支持自定义配置文件
                pass

            website_configs = config_loader.get_enabled_websites()

            if not website_configs:
                self.logger.warning("没有找到启用的网站配置")
                return []

            self.logger.info(f"开始爬取 {len(website_configs)} 个网站")

            # 执行爬取
            results = self._scrape_websites(website_configs, max_pages)

            # 生成报告
            self._generate_reports(results)

            return results

        except Exception as e:
            self.logger.error(f"爬虫运行失败: {e}")
            raise

    def _scrape_websites(self, website_configs: List, max_pages: Optional[int] = None) -> List[ScrapingResult]:
        """
        爬取多个网站

        Args:
            website_configs: 网站配置列表
            max_pages: 最大爬取页数（可选）

        Returns:
            爬取结果列表
        """
        results = []

        for config in website_configs:
            try:
                # 验证配置
                if not config_loader.validate_config(config):
                    self.logger.error(f"配置验证失败: {config.name}")
                    continue

                # 如果指定了页数，更新配置
                if max_pages is not None:
                    # 确保配置结构存在
                    if 'special_config' not in config.selectors:
                        config.selectors['special_config'] = {}

                    if 'pagination' not in config.selectors['special_config']:
                        config.selectors['special_config']['pagination'] = {}

                    # 更新分页配置
                    config.selectors['special_config']['pagination']['enabled'] = True
                    config.selectors['special_config']['pagination']['max_pages'] = max_pages

                    # 设置默认值
                    if 'page_delay' not in config.selectors['special_config']['pagination']:
                        config.selectors['special_config']['pagination']['page_delay'] = 3
                    if 'wait_for_load' not in config.selectors['special_config']['pagination']:
                        config.selectors['special_config']['pagination']['wait_for_load'] = 5

                    self.logger.info(f"设置最大爬取页数为: {max_pages}")

                # 创建爬虫
                scraper = ScraperFactory.create_scraper(config)

                # 执行爬取
                with scraper:
                    result = scraper.scrape()
                    results.append(result)

                # 记录结果
                if result.success:
                    self.scraping_logger.finish_scraping(
                        result.url,
                        result.get_data_count(),
                        result.processing_time
                    )
                else:
                    self.scraping_logger.scraping_error(result.url, Exception(result.error_message))

            except Exception as e:
                self.logger.error(f"爬取网站失败 {config.name}: {e}")

                # 创建失败结果
                failed_result = ScrapingResult(
                    url=config.url,
                    success=False,
                    error_message=str(e)
                )
                results.append(failed_result)

        return results

    def _generate_reports(self, results: List[ScrapingResult]) -> None:
        """
        生成报告

        Args:
            results: 爬取结果列表
        """
        try:
            # 生成数据报告
            output_file = markdown_writer.write_results(results)
            if output_file:
                self.logger.info(f"数据报告已生成: {output_file}")

            # 生成详细报告
            detailed_file = markdown_writer.write_detailed_report(results)
            if detailed_file:
                self.logger.info(f"详细报告已生成: {detailed_file}")

        except Exception as e:
            self.logger.error(f"生成报告失败: {e}")

    def list_websites(self) -> None:
        """列出所有配置的网站"""
        try:
            website_configs = config_loader.load_website_configs()

            print("\n配置的网站列表:")
            print("-" * 60)

            for i, config in enumerate(website_configs, 1):
                status = "✅ 启用" if config.enabled else "❌ 禁用"
                print(f"{i}. {config.name} ({status})")
                print(f"   URL: {config.url}")
                print(f"   爬虫类型: {config.scraper_type}")
                print(f"   超时: {config.timeout}s, 重试: {config.max_retries}次")
                print()

        except Exception as e:
            self.logger.error(f"列出网站失败: {e}")

    def test_config(self) -> None:
        """测试配置"""
        try:
            website_configs = config_loader.load_website_configs()

            print("\n配置测试结果:")
            print("-" * 60)

            for config in website_configs:
                is_valid = config_loader.validate_config(config)
                status = "✅ 有效" if is_valid else "❌ 无效"
                print(f"{config.name}: {status}")

                if not is_valid:
                    print(f"  错误: 配置验证失败")
                print()

        except Exception as e:
            self.logger.error(f"测试配置失败: {e}")


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='网络爬虫工具')
    parser.add_argument('--list', action='store_true', help='列出配置的网站')
    parser.add_argument('--test', action='store_true', help='测试配置')
    parser.add_argument('--config', type=str, help='指定配置文件路径')
    parser.add_argument('--pages', type=int, help='指定爬取页数（默认为配置文件中的设置）')

    args = parser.parse_args()

    scraper = WebScraper()

    try:
        if args.list:
            scraper.list_websites()
        elif args.test:
            scraper.test_config()
        else:
            # 运行爬虫
            start_time = time.time()
            results = scraper.run(args.config, args.pages)
            end_time = time.time()

            # 统计结果
            total_requests = len(results)
            successful_requests = sum(1 for r in results if r.success)
            total_data = sum(len(r.data) for r in results if r.success)

            print(f"\n爬取完成!")
            print(f"总耗时: {end_time - start_time:.2f}秒")
            print(f"总请求数: {total_requests}")
            print(f"成功请求数: {successful_requests}")
            print(f"总数据条数: {total_data}")

    except KeyboardInterrupt:
        print("\n用户中断操作")
    except Exception as e:
        print(f"程序执行失败: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()
