# 崩坏3官网爬虫工具函数

import os
import re
import json
import time
import logging
import requests
from urllib.parse import urljoin, urlparse
from PIL import Image
from pathlib import Path
from typing import List, Dict, Optional, Tuple

def setup_logging(log_dir: str = 'logs', level: str = 'INFO') -> logging.Logger:
    """设置日志记录"""
    os.makedirs(log_dir, exist_ok=True)
    
    logger = logging.getLogger('bh3_crawler')
    logger.setLevel(getattr(logging, level.upper()))
    
    # 文件处理器
    log_file = os.path.join(log_dir, f'crawler_{time.strftime("%Y%m%d_%H%M%S")}.log')
    file_handler = logging.FileHandler(log_file, encoding='utf-8')
    file_handler.setLevel(logging.DEBUG)
    
    # 控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.INFO)
    
    # 格式化器
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(formatter)
    console_handler.setFormatter(formatter)
    
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)
    
    return logger

def create_directory_structure(base_dir: str) -> Dict[str, str]:
    """创建目录结构"""
    dirs = {
        'base': base_dir,
        'images': os.path.join(base_dir, 'images'),
        'data': os.path.join(base_dir, 'data'),
        'logs': os.path.join(base_dir, 'logs'),
        'main': os.path.join(base_dir, 'images', 'main'),
        'valkyries': os.path.join(base_dir, 'images', 'valkyries')
    }
    
    for dir_path in dirs.values():
        os.makedirs(dir_path, exist_ok=True)
    
    return dirs

def is_valid_image_url(url: str, supported_formats: List[str]) -> bool:
    """检查是否为有效的图片URL"""
    if not url or not isinstance(url, str):
        return False
    
    # 移除查询参数
    clean_url = url.split('?')[0].lower()
    
    # 检查文件扩展名
    return any(clean_url.endswith(fmt) for fmt in supported_formats)

def clean_filename(filename: str) -> str:
    """清理文件名，移除非法字符"""
    # 移除或替换非法字符
    filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
    # 限制长度
    if len(filename) > 200:
        name, ext = os.path.splitext(filename)
        filename = name[:200-len(ext)] + ext
    return filename

def get_image_info(image_path: str) -> Optional[Dict]:
    """获取图片信息"""
    try:
        with Image.open(image_path) as img:
            return {
                'width': img.width,
                'height': img.height,
                'format': img.format,
                'mode': img.mode,
                'size_bytes': os.path.getsize(image_path)
            }
    except Exception:
        return None

def download_image(url: str, save_path: str, headers: Dict = None, 
                  max_size: int = 10*1024*1024, timeout: int = 30) -> Tuple[bool, str]:
    """下载图片"""
    try:
        # 设置默认headers
        if headers is None:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
            }
        
        response = requests.get(url, headers=headers, timeout=timeout, stream=True)
        response.raise_for_status()
        
        # 检查内容类型
        content_type = response.headers.get('content-type', '').lower()
        if not content_type.startswith('image/'):
            return False, f"不是图片类型: {content_type}"
        
        # 检查文件大小
        content_length = response.headers.get('content-length')
        if content_length and int(content_length) > max_size:
            return False, f"文件过大: {content_length} bytes"
        
        # 创建目录
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        
        # 下载文件
        with open(save_path, 'wb') as f:
            downloaded = 0
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
                    downloaded += len(chunk)
                    if downloaded > max_size:
                        f.close()
                        os.remove(save_path)
                        return False, f"下载过程中文件过大: {downloaded} bytes"
        
        return True, "下载成功"
        
    except requests.exceptions.RequestException as e:
        return False, f"网络错误: {str(e)}"
    except Exception as e:
        return False, f"下载错误: {str(e)}"

def save_metadata(data: List[Dict], file_path: str, format_type: str = 'json') -> bool:
    """保存元数据"""
    try:
        os.makedirs(os.path.dirname(file_path), exist_ok=True)
        
        if format_type.lower() == 'json':
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
        elif format_type.lower() == 'csv':
            import pandas as pd
            df = pd.DataFrame(data)
            df.to_csv(file_path, index=False, encoding='utf-8-sig')
        else:
            raise ValueError(f"不支持的格式: {format_type}")
        
        return True
    except Exception as e:
        logging.error(f"保存元数据失败: {str(e)}")
        return False

def extract_domain_from_url(url: str) -> str:
    """从URL提取域名"""
    try:
        parsed = urlparse(url)
        return parsed.netloc.replace('www.', '')
    except:
        return 'unknown'

def normalize_url(url: str, base_url: str) -> str:
    """标准化URL"""
    if not url:
        return ''
    
    # 处理相对URL
    if url.startswith('//'):
        return 'https:' + url
    elif url.startswith('/'):
        return urljoin(base_url, url)
    elif not url.startswith(('http://', 'https://')):
        return urljoin(base_url, url)
    
    return url

def generate_filename_from_url(url: str, index: int = None) -> str:
    """从URL生成文件名"""
    try:
        parsed = urlparse(url)
        path = parsed.path
        
        if path and path != '/':
            filename = os.path.basename(path)
            if filename and '.' in filename:
                return clean_filename(filename)
        
        # 如果无法从URL获取文件名，生成一个
        domain = extract_domain_from_url(url)
        timestamp = int(time.time())
        index_str = f"_{index}" if index is not None else ""
        return f"{domain}_{timestamp}{index_str}.jpg"
        
    except:
        timestamp = int(time.time())
        index_str = f"_{index}" if index is not None else ""
        return f"image_{timestamp}{index_str}.jpg"

def resize_image_if_needed(image_path: str, max_width: int = 2048, max_height: int = 2048) -> bool:
    """如果图片过大则调整大小"""
    try:
        with Image.open(image_path) as img:
            if img.width > max_width or img.height > max_height:
                img.thumbnail((max_width, max_height), Image.Resampling.LANCZOS)
                img.save(image_path, optimize=True, quality=85)
                return True
        return False
    except Exception as e:
        logging.error(f"调整图片大小失败 {image_path}: {str(e)}")
        return False

def validate_image_file(file_path: str, min_size: int = 1024) -> bool:
    """验证图片文件"""
    try:
        if not os.path.exists(file_path):
            return False
        
        # 检查文件大小
        if os.path.getsize(file_path) < min_size:
            return False
        
        # 尝试打开图片
        with Image.open(file_path) as img:
            img.verify()
        
        return True
    except:
        return False