#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
崩坏3官网图片爬虫
爬取 https://bh3.mihoyo.com/main 和 https://bh3.mihoyo.com/valkyries 的图片
"""

import os
import re
import time
import json
import logging
from typing import List, Dict, Set, Optional
from urllib.parse import urljoin, urlparse

from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, WebDriverException
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
from tqdm import tqdm

from config import TARGET_URLS, CRAWLER_SETTINGS, STORAGE_SETTINGS, IMAGE_SETTINGS, SELENIUM_SETTINGS
from utils import (
    setup_logging, create_directory_structure, is_valid_image_url,
    download_image, save_metadata, normalize_url, generate_filename_from_url,
    resize_image_if_needed, validate_image_file, extract_domain_from_url
)

class BH3ImageCrawler:
    """崩坏3官网图片爬虫类"""
    
    def __init__(self):
        self.logger = setup_logging(STORAGE_SETTINGS['logs_dir'])
        self.dirs = create_directory_structure(STORAGE_SETTINGS['base_dir'])
        self.driver = None
        self.crawled_urls: Set[str] = set()
        self.image_data: List[Dict] = []
        
    def setup_driver(self) -> webdriver.Chrome:
        """设置Chrome浏览器驱动"""
        try:
            chrome_options = Options()
            
            # 基本设置
            if CRAWLER_SETTINGS['headless']:
                chrome_options.add_argument('--headless')
            
            chrome_options.add_argument(f'--user-agent={CRAWLER_SETTINGS["user_agent"]}')
            chrome_options.add_argument(f'--window-size={CRAWLER_SETTINGS["window_size"][0]},{CRAWLER_SETTINGS["window_size"][1]}')
            
            # 性能优化设置
            chrome_options.add_argument('--no-sandbox')
            chrome_options.add_argument('--disable-dev-shm-usage')
            chrome_options.add_argument('--disable-gpu')
            chrome_options.add_argument('--disable-web-security')
            chrome_options.add_argument('--disable-features=VizDisplayCompositor')
            
            # 图片加载设置
            if not IMAGE_SETTINGS.get('enable_images', True):
                prefs = {"profile.managed_default_content_settings.images": 2}
                chrome_options.add_experimental_option("prefs", prefs)
            
            # 自动下载ChromeDriver
            service = Service(ChromeDriverManager().install())
            
            driver = webdriver.Chrome(service=service, options=chrome_options)
            
            # 设置超时
            driver.implicitly_wait(SELENIUM_SETTINGS['implicit_wait'])
            driver.set_page_load_timeout(SELENIUM_SETTINGS['page_load_timeout'])
            driver.set_script_timeout(SELENIUM_SETTINGS['script_timeout'])
            
            self.logger.info("Chrome浏览器驱动设置成功")
            return driver
            
        except Exception as e:
            self.logger.error(f"设置浏览器驱动失败: {str(e)}")
            raise
    
    def wait_for_page_load(self, driver: webdriver.Chrome, timeout: int = 30) -> bool:
        """等待页面完全加载"""
        try:
            # 等待页面基本加载完成
            WebDriverWait(driver, timeout).until(
                lambda d: d.execute_script("return document.readyState") == "complete"
            )
            
            # 额外等待JavaScript渲染
            time.sleep(3)
            
            # 尝试滚动页面以触发懒加载
            self.scroll_page(driver)
            
            return True
            
        except TimeoutException:
            self.logger.warning(f"页面加载超时: {driver.current_url}")
            return False
    
    def scroll_page(self, driver: webdriver.Chrome, scroll_pause_time: float = 2) -> None:
        """滚动页面以触发懒加载图片"""
        try:
            # 获取页面高度
            last_height = driver.execute_script("return document.body.scrollHeight")
            
            scroll_position = 0
            scroll_step = 800
            
            while scroll_position < last_height:
                # 滚动到指定位置
                driver.execute_script(f"window.scrollTo(0, {scroll_position});")
                time.sleep(scroll_pause_time)
                
                # 检查是否有新内容加载
                new_height = driver.execute_script("return document.body.scrollHeight")
                if new_height > last_height:
                    last_height = new_height
                
                scroll_position += scroll_step
            
            # 滚动到顶部
            driver.execute_script("window.scrollTo(0, 0);")
            time.sleep(1)
            
            self.logger.info(f"页面滚动完成，最终高度: {last_height}px")
            
        except Exception as e:
            self.logger.error(f"滚动页面时出错: {str(e)}")
    
    def extract_images_from_page(self, driver: webdriver.Chrome, url: str) -> List[Dict]:
        """从页面提取图片信息"""
        images = []
        
        try:
            # 获取页面源码
            page_source = driver.page_source
            soup = BeautifulSoup(page_source, 'html.parser')
            
            # 查找所有可能包含图片的元素
            image_selectors = [
                'img',
                '[style*="background-image"]',
                '[data-src]',
                '[data-original]',
                '[data-lazy]',
                'picture source',
                'video poster'
            ]
            
            found_elements = []
            for selector in image_selectors:
                elements = soup.select(selector)
                found_elements.extend(elements)
            
            self.logger.info(f"在 {url} 找到 {len(found_elements)} 个可能的图片元素")
            
            # 提取图片URL
            for element in found_elements:
                image_urls = self.extract_urls_from_element(element, url)
                
                for img_url in image_urls:
                    if img_url and img_url not in self.crawled_urls:
                        if is_valid_image_url(img_url, CRAWLER_SETTINGS['supported_formats']):
                            images.append({
                                'url': img_url,
                                'source_page': url,
                                'alt_text': element.get('alt', ''),
                                'title': element.get('title', ''),
                                'element_tag': element.name,
                                'found_time': time.strftime('%Y-%m-%d %H:%M:%S')
                            })
                            self.crawled_urls.add(img_url)
            
            # 使用Selenium直接查找图片元素（处理动态加载的图片）
            selenium_images = self.extract_images_with_selenium(driver, url)
            images.extend(selenium_images)
            
            self.logger.info(f"从 {url} 提取到 {len(images)} 个有效图片URL")
            return images
            
        except Exception as e:
            self.logger.error(f"提取图片时出错 {url}: {str(e)}")
            return []
    
    def extract_urls_from_element(self, element, base_url: str) -> List[str]:
        """从HTML元素提取图片URL"""
        urls = []
        
        # 标准img标签
        if element.name == 'img':
            for attr in ['src', 'data-src', 'data-original', 'data-lazy']:
                if element.get(attr):
                    urls.append(normalize_url(element[attr], base_url))
        
        # background-image样式
        style = element.get('style', '')
        if 'background-image' in style:
            bg_match = re.search(r'background-image:\s*url\(["\']?([^"\')]+)["\']?\)', style)
            if bg_match:
                urls.append(normalize_url(bg_match.group(1), base_url))
        
        # picture元素的source
        if element.name == 'source':
            for attr in ['srcset', 'src']:
                if element.get(attr):
                    # 处理srcset中的多个URL
                    srcset_urls = re.findall(r'([^\s,]+)(?:\s+[\d.]+[wx])?', element[attr])
                    for srcset_url in srcset_urls:
                        urls.append(normalize_url(srcset_url, base_url))
        
        # video的poster
        if element.name == 'video' and element.get('poster'):
            urls.append(normalize_url(element['poster'], base_url))
        
        return urls
    
    def extract_images_with_selenium(self, driver: webdriver.Chrome, url: str) -> List[Dict]:
        """使用Selenium直接查找图片元素"""
        images = []
        
        try:
            # 查找所有img元素
            img_elements = driver.find_elements(By.TAG_NAME, 'img')
            
            for img_element in img_elements:
                try:
                    img_url = img_element.get_attribute('src')
                    if not img_url:
                        # 尝试其他属性
                        for attr in ['data-src', 'data-original', 'data-lazy']:
                            img_url = img_element.get_attribute(attr)
                            if img_url:
                                break
                    
                    if img_url and img_url not in self.crawled_urls:
                        img_url = normalize_url(img_url, url)
                        if is_valid_image_url(img_url, CRAWLER_SETTINGS['supported_formats']):
                            images.append({
                                'url': img_url,
                                'source_page': url,
                                'alt_text': img_element.get_attribute('alt') or '',
                                'title': img_element.get_attribute('title') or '',
                                'element_tag': 'img',
                                'found_time': time.strftime('%Y-%m-%d %H:%M:%S'),
                                'width': img_element.get_attribute('width'),
                                'height': img_element.get_attribute('height')
                            })
                            self.crawled_urls.add(img_url)
                            
                except Exception as e:
                    self.logger.debug(f"处理img元素时出错: {str(e)}")
                    continue
            
            return images
            
        except Exception as e:
            self.logger.error(f"使用Selenium提取图片时出错: {str(e)}")
            return []
    
    def download_images(self, images: List[Dict], page_name: str) -> List[Dict]:
        """下载图片并更新元数据"""
        if not IMAGE_SETTINGS['download_images']:
            self.logger.info("图片下载已禁用，跳过下载")
            return images
        
        download_dir = self.dirs[page_name] if page_name in self.dirs else self.dirs['images']
        successful_downloads = []
        
        headers = {
            'User-Agent': CRAWLER_SETTINGS['user_agent'],
            'Referer': images[0]['source_page'] if images else 'https://bh3.mihoyo.com/'
        }
        
        self.logger.info(f"开始下载 {len(images)} 张图片到 {download_dir}")
        
        for i, image_info in enumerate(tqdm(images, desc="下载图片")):
            try:
                # 生成文件名
                filename = generate_filename_from_url(image_info['url'], i)
                file_path = os.path.join(download_dir, filename)
                
                # 检查文件是否已存在
                if os.path.exists(file_path) and validate_image_file(file_path):
                    self.logger.debug(f"文件已存在，跳过: {filename}")
                    image_info['local_path'] = file_path
                    image_info['download_status'] = 'already_exists'
                    successful_downloads.append(image_info)
                    continue
                
                # 下载图片
                success, message = download_image(
                    image_info['url'], 
                    file_path, 
                    headers=headers,
                    max_size=IMAGE_SETTINGS['max_file_size'],
                    timeout=CRAWLER_SETTINGS['timeout']
                )
                
                if success:
                    # 验证下载的图片
                    if validate_image_file(file_path, CRAWLER_SETTINGS['image_min_size']):
                        # 调整图片大小（如果需要）
                        if IMAGE_SETTINGS['resize_large_images']:
                            resize_image_if_needed(
                                file_path, 
                                IMAGE_SETTINGS['max_width'], 
                                IMAGE_SETTINGS['max_height']
                            )
                        
                        image_info['local_path'] = file_path
                        image_info['download_status'] = 'success'
                        image_info['download_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
                        successful_downloads.append(image_info)
                        
                        self.logger.debug(f"下载成功: {filename}")
                    else:
                        # 删除无效文件
                        if os.path.exists(file_path):
                            os.remove(file_path)
                        image_info['download_status'] = 'invalid_file'
                        self.logger.warning(f"下载的文件无效: {filename}")
                else:
                    image_info['download_status'] = 'failed'
                    image_info['download_error'] = message
                    self.logger.warning(f"下载失败 {filename}: {message}")
                
                # 请求间隔
                if CRAWLER_SETTINGS['delay_between_requests'] > 0:
                    time.sleep(CRAWLER_SETTINGS['delay_between_requests'])
                    
            except Exception as e:
                self.logger.error(f"下载图片时出错 {image_info['url']}: {str(e)}")
                image_info['download_status'] = 'error'
                image_info['download_error'] = str(e)
        
        self.logger.info(f"成功下载 {len(successful_downloads)} 张图片")
        return images
    
    def crawl_page(self, url: str) -> List[Dict]:
        """爬取单个页面"""
        self.logger.info(f"开始爬取页面: {url}")
        
        try:
            # 访问页面
            self.driver.get(url)
            
            # 等待页面加载
            if not self.wait_for_page_load(self.driver):
                self.logger.warning(f"页面加载可能不完整: {url}")
            
            # 提取图片
            images = self.extract_images_from_page(self.driver, url)
            
            if not images:
                self.logger.warning(f"未在页面中找到图片: {url}")
                return []
            
            # 确定页面名称用于文件夹分类
            page_name = 'main' if 'main' in url else 'valkyries' if 'valkyries' in url else 'other'
            
            # 下载图片
            images = self.download_images(images, page_name)
            
            # 保存页面元数据
            self.save_page_metadata(images, url, page_name)
            
            return images
            
        except Exception as e:
            self.logger.error(f"爬取页面失败 {url}: {str(e)}")
            return []
    
    def save_page_metadata(self, images: List[Dict], url: str, page_name: str) -> None:
        """保存页面元数据"""
        try:
            metadata = {
                'page_url': url,
                'page_name': page_name,
                'crawl_time': time.strftime('%Y-%m-%d %H:%M:%S'),
                'total_images': len(images),
                'successful_downloads': len([img for img in images if img.get('download_status') == 'success']),
                'images': images
            }
            
            # 保存JSON格式
            json_file = os.path.join(self.dirs['data'], f'{page_name}_metadata.json')
            save_metadata([metadata], json_file, 'json')
            
            # 保存CSV格式（仅图片信息）
            if images:
                csv_file = os.path.join(self.dirs['data'], f'{page_name}_images.csv')
                save_metadata(images, csv_file, 'csv')
            
            self.logger.info(f"元数据已保存: {json_file}")
            
        except Exception as e:
            self.logger.error(f"保存元数据失败: {str(e)}")
    
    def run(self) -> None:
        """运行爬虫"""
        self.logger.info("=" * 50)
        self.logger.info("崩坏3官网图片爬虫开始运行")
        self.logger.info(f"目标URL: {TARGET_URLS}")
        self.logger.info(f"存储目录: {self.dirs['base']}")
        self.logger.info("=" * 50)
        
        try:
            # 设置浏览器驱动
            self.driver = self.setup_driver()
            
            all_images = []
            
            # 爬取每个目标URL
            for url in TARGET_URLS:
                try:
                    images = self.crawl_page(url)
                    all_images.extend(images)
                    
                    # 页面间隔
                    if CRAWLER_SETTINGS['delay_between_requests'] > 0:
                        time.sleep(CRAWLER_SETTINGS['delay_between_requests'])
                        
                except Exception as e:
                    self.logger.error(f"爬取URL失败 {url}: {str(e)}")
                    continue
            
            # 保存汇总数据
            self.save_summary_data(all_images)
            
            self.logger.info("=" * 50)
            self.logger.info(f"爬虫运行完成！")
            self.logger.info(f"总共处理 {len(all_images)} 张图片")
            self.logger.info(f"成功下载 {len([img for img in all_images if img.get('download_status') == 'success'])} 张图片")
            self.logger.info(f"结果保存在: {self.dirs['base']}")
            self.logger.info("=" * 50)
            
        except Exception as e:
            self.logger.error(f"爬虫运行出错: {str(e)}")
            raise
        finally:
            if self.driver:
                self.driver.quit()
                self.logger.info("浏览器驱动已关闭")
    
    def save_summary_data(self, all_images: List[Dict]) -> None:
        """保存汇总数据"""
        try:
            summary = {
                'crawl_time': time.strftime('%Y-%m-%d %H:%M:%S'),
                'target_urls': TARGET_URLS,
                'total_images': len(all_images),
                'successful_downloads': len([img for img in all_images if img.get('download_status') == 'success']),
                'failed_downloads': len([img for img in all_images if img.get('download_status') == 'failed']),
                'settings': {
                    'crawler_settings': CRAWLER_SETTINGS,
                    'image_settings': IMAGE_SETTINGS
                },
                'images': all_images
            }
            
            # 保存汇总JSON
            summary_file = os.path.join(self.dirs['data'], 'crawl_summary.json')
            save_metadata([summary], summary_file, 'json')
            
            # 保存所有图片CSV
            if all_images:
                all_images_csv = os.path.join(self.dirs['data'], 'all_images.csv')
                save_metadata(all_images, all_images_csv, 'csv')
            
            self.logger.info(f"汇总数据已保存: {summary_file}")
            
        except Exception as e:
            self.logger.error(f"保存汇总数据失败: {str(e)}")

def main():
    """主函数"""
    try:
        crawler = BH3ImageCrawler()
        crawler.run()
    except KeyboardInterrupt:
        print("\n用户中断爬虫运行")
    except Exception as e:
        print(f"爬虫运行失败: {str(e)}")
        raise

if __name__ == '__main__':
    main()