#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
崩坏3图片爬虫启动脚本
"""

import os
import sys
import time
import logging
from pathlib import Path

# 添加当前目录到Python路径
current_dir = Path(__file__).parent
sys.path.insert(0, str(current_dir))

try:
    from bh3_crawler import BH3ImageCrawler
    from config import STORAGE_SETTINGS
    from utils import create_directory_structure, setup_logging
except ImportError as e:
    print(f"导入模块失败: {e}")
    print("请确保所有必要的文件都在当前目录中")
    sys.exit(1)

def check_dependencies():
    """检查依赖包是否安装"""
    required_packages = [
        'selenium',
        'webdriver_manager', 
        'beautifulsoup4',
        'requests',
        'pillow',
        'tqdm',
        'pandas'
    ]
    
    missing_packages = []
    
    for package in required_packages:
        try:
            __import__(package.replace('-', '_'))
        except ImportError:
            missing_packages.append(package)
    
    if missing_packages:
        print("缺少以下依赖包:")
        for package in missing_packages:
            print(f"  - {package}")
        print("\n请运行以下命令安装依赖:")
        print(f"pip install {' '.join(missing_packages)}")
        print("\n或者运行:")
        print("pip install -r requirements.txt")
        return False
    
    return True

def create_data_folders():
    """创建数据存储文件夹结构"""
    try:
        print("创建数据存储文件夹...")
        dirs = create_directory_structure(STORAGE_SETTINGS['base_dir'])
        
        print("文件夹结构创建完成:")
        for name, path in dirs.items():
            print(f"  {name}: {path}")
        
        return dirs
        
    except Exception as e:
        print(f"创建文件夹失败: {e}")
        return None

def print_banner():
    """打印启动横幅"""
    banner = """
╔══════════════════════════════════════════════════════════════╗
║                    崩坏3官网图片爬虫                          ║
║                                                              ║
║  目标网站:                                                    ║
║    • https://bh3.mihoyo.com/main                            ║
║    • https://bh3.mihoyo.com/valkyries                       ║
║                                                              ║
║  功能特性:                                                    ║
║    • 自动处理JavaScript渲染页面                               ║
║    • 智能图片识别和下载                                       ║
║    • 支持懒加载图片抓取                                       ║
║    • 自动图片格式转换和大小调整                               ║
║    • 详细的爬取日志和元数据保存                               ║
║                                                              ║
╚══════════════════════════════════════════════════════════════╝
"""
    print(banner)

def print_settings_info():
    """打印配置信息"""
    print("\n当前配置:")
    print(f"  存储目录: {STORAGE_SETTINGS['base_dir']}")
    print(f"  图片下载: {'启用' if STORAGE_SETTINGS.get('download_images', True) else '禁用'}")
    print(f"  无头模式: {'启用' if STORAGE_SETTINGS.get('headless', True) else '禁用'}")
    print()

def main():
    """主函数"""
    print_banner()
    
    # 检查依赖
    print("检查依赖包...")
    if not check_dependencies():
        return 1
    print("✓ 所有依赖包已安装")
    
    # 创建文件夹
    dirs = create_data_folders()
    if not dirs:
        return 1
    print("✓ 数据文件夹创建完成")
    
    # 显示配置信息
    print_settings_info()
    
    # 询问用户是否继续
    try:
        response = input("是否开始爬取? (y/n): ").lower().strip()
        if response not in ['y', 'yes', '是', '']:
            print("爬虫已取消")
            return 0
    except KeyboardInterrupt:
        print("\n爬虫已取消")
        return 0
    
    # 开始爬取
    print("\n开始爬取...")
    start_time = time.time()
    
    try:
        crawler = BH3ImageCrawler()
        crawler.run()
        
        end_time = time.time()
        duration = end_time - start_time
        
        print(f"\n爬取完成! 总耗时: {duration:.2f} 秒")
        print(f"结果保存在: {dirs['base']}")
        
        # 显示结果统计
        try:
            import json
            summary_file = os.path.join(dirs['data'], 'crawl_summary.json')
            if os.path.exists(summary_file):
                with open(summary_file, 'r', encoding='utf-8') as f:
                    summary = json.load(f)
                    if isinstance(summary, list) and summary:
                        summary = summary[0]
                    
                    print("\n爬取统计:")
                    print(f"  总图片数: {summary.get('total_images', 0)}")
                    print(f"  成功下载: {summary.get('successful_downloads', 0)}")
                    print(f"  下载失败: {summary.get('failed_downloads', 0)}")
        except Exception as e:
            print(f"读取统计信息失败: {e}")
        
        return 0
        
    except KeyboardInterrupt:
        print("\n用户中断爬虫运行")
        return 1
    except Exception as e:
        print(f"\n爬虫运行失败: {e}")
        logging.exception("详细错误信息:")
        return 1

if __name__ == '__main__':
    exit_code = main()
    
    # 等待用户按键退出
    if exit_code == 0:
        try:
            input("\n按回车键退出...")
        except KeyboardInterrupt:
            pass
    
    sys.exit(exit_code)