#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
相册网站图片下载器
支持下载指定网站的相册和图片，包含分页处理
作者: AI Assistant
创建时间: 2025
"""

import os
import sys
import requests
from bs4 import BeautifulSoup
import urllib.parse
import time
import logging
from datetime import datetime
import concurrent.futures
import re
from pathlib import Path
from typing import List, Dict, Set
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from tqdm import tqdm
import urllib3

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class AlbumDownloader:
    """相册下载器类"""
    
    def __init__(self, base_url: str, download_dir: str = "downloads"):
        """
        初始化下载器
        
        Args:
            base_url: 相册网站的基础URL
            download_dir: 下载目录
        """
        self.base_url = base_url
        self.download_dir = Path(download_dir)
        self.download_dir.mkdir(exist_ok=True)
        
        # 设置日志
        self.setup_logger()
        
        # 设置请求会话
        self.session = self.create_session()
        
        # 已下载的图片URL集合，避免重复下载
        self.downloaded_urls: Set[str] = set()
        
        # 统计信息
        self.stats = {
            'albums_found': 0,
            'images_downloaded': 0,
            'images_skipped': 0,
            'errors': 0
        }
    
    def setup_logger(self) -> None:
        """设置日志记录器"""
        log_dir = Path("logs")
        log_dir.mkdir(exist_ok=True)
        
        log_filename = log_dir / f'album_downloader_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'
        
        formatter = logging.Formatter(
            '%(asctime)s [%(levelname)s] %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S'
        )
        
        # 文件处理器
        file_handler = logging.FileHandler(log_filename, encoding='utf-8')
        file_handler.setFormatter(formatter)
        
        # 控制台处理器
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        
        # 配置日志记录器
        self.logger = logging.getLogger('AlbumDownloader')
        self.logger.setLevel(logging.INFO)
        self.logger.handlers.clear()
        self.logger.addHandler(file_handler)
        self.logger.addHandler(console_handler)
    
    def create_session(self) -> requests.Session:
        """创建带重试机制的请求会话"""
        session = requests.Session()
        
        # 设置重试策略
        retry_strategy = Retry(
            total=3,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["HEAD", "GET", "OPTIONS"],
            backoff_factor=1
        )
        
        adapter = HTTPAdapter(max_retries=retry_strategy)
        session.mount("http://", adapter)
        session.mount("https://", adapter)
        
        # 设置请求头
        session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Cache-Control': 'max-age=0'
        })
        
        return session
    
    def clean_filename(self, filename: str, is_folder: bool = False) -> str:
        """清理文件名，移除非法字符"""
        # 移除非法字符
        invalid_chars = '<>:"/\\|?*\n\r\t'
        if is_folder:
            invalid_chars += '[]()（）《》【】『』「」［］〈〉{}！!@#$%^&*=+'
        
        for char in invalid_chars:
            filename = filename.replace(char, '_')
        
        # 移除多余空格并限制长度
        filename = re.sub(r'\s+', '_', filename.strip())
        if len(filename) > 100:
            filename = filename[:100]
        
        # 确保文件名不为空
        if not filename:
            filename = 'untitled_folder' if is_folder else 'untitled'
        
        return filename
    
    def get_page_content(self, url: str) -> BeautifulSoup:
        """获取页面内容并解析为BeautifulSoup对象"""
        try:
            self.logger.info(f"正在获取页面: {url}")
            
            # 添加随机延迟避免被检测
            import random
            time.sleep(random.uniform(1, 3))
            
            response = self.session.get(url, timeout=30, verify=False)
            response.raise_for_status()
            
            # 尝试检测编码
            if response.encoding == 'ISO-8859-1':
                response.encoding = response.apparent_encoding
            
            soup = BeautifulSoup(response.text, 'html.parser')
            return soup
            
        except Exception as e:
            self.logger.error(f"获取页面失败 {url}: {str(e)}")
            self.stats['errors'] += 1
            return None
    
    def extract_album_links(self, soup: BeautifulSoup, base_url: str) -> List[Dict[str, str]]:
        """从页面中提取相册链接"""
        albums = []
        
        # 根据网站结构调整选择器
        # 这里需要根据实际网站的HTML结构来调整
        album_selectors = [
            'a[href*="album"]',
            'a[href*="gallery"]',
            'a[href*="set"]',
            '.album-item a',
            '.gallery-item a',
            '.thumbnail a',
            'a img',  # 包含图片的链接
        ]
        
        for selector in album_selectors:
            links = soup.select(selector)
            for link in links:
                href = link.get('href')
                if href:
                    # 转换为绝对URL
                    full_url = urllib.parse.urljoin(base_url, href)
                    
                    # 获取相册标题
                    title = ''
                    if link.get('title'):
                        title = link['title']
                    elif link.find('img') and link.find('img').get('alt'):
                        title = link.find('img')['alt']
                    elif link.text.strip():
                        title = link.text.strip()
                    else:
                        title = f"album_{len(albums) + 1}"
                    
                    albums.append({
                        'url': full_url,
                        'title': self.clean_filename(title, is_folder=True)
                    })
        
        # 去重
        seen_urls = set()
        unique_albums = []
        for album in albums:
            if album['url'] not in seen_urls:
                seen_urls.add(album['url'])
                unique_albums.append(album)
        
        return unique_albums
    
    def extract_image_urls(self, soup: BeautifulSoup, base_url: str) -> List[str]:
        """从页面中提取图片URL"""
        image_urls = set()
        
        # 查找所有图片标签
        for img in soup.find_all('img'):
            # 尝试多种可能的图片URL属性
            for attr in ['src', 'data-src', 'data-original', 'data-original-src', 'data-lazy-src']:
                if img.get(attr):
                    url = img[attr]
                    # 转换为绝对URL
                    full_url = urllib.parse.urljoin(base_url, url)
                    # 过滤掉明显不是内容图片的URL
                    if self.is_valid_image_url(full_url):
                        image_urls.add(full_url)
        
        # 查找可能的高清图片链接
        for link in soup.find_all('a'):
            href = link.get('href', '')
            if any(ext in href.lower() for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp']):
                full_url = urllib.parse.urljoin(base_url, href)
                if self.is_valid_image_url(full_url):
                    image_urls.add(full_url)
        
        return list(image_urls)
    
    def is_valid_image_url(self, url: str) -> bool:
        """检查URL是否为有效的图片URL"""
        # 过滤掉明显不是内容图片的URL
        exclude_patterns = [
            'avatar', 'icon', 'logo', 'banner', 'ad', 'thumb',
            'button', 'bg', 'background', 'header', 'footer'
        ]
        
        url_lower = url.lower()
        
        # 检查是否包含图片扩展名
        if not any(ext in url_lower for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp']):
            return False
        
        # 检查是否包含排除的模式
        if any(pattern in url_lower for pattern in exclude_patterns):
            return False
        
        # 检查图片尺寸（避免下载太小的图片）
        if any(size in url_lower for size in ['_s.', '_xs.', '_thumb.', '_small.']):
            return False
        
        return True
    
    def download_image(self, url: str, save_path: Path) -> bool:
        """下载单张图片"""
        try:
            if url in self.downloaded_urls:
                self.stats['images_skipped'] += 1
                return True
            
            response = self.session.get(url, timeout=30, stream=True, verify=False)
            response.raise_for_status()
            
            # 检查内容类型
            content_type = response.headers.get('content-type', '')
            if not content_type.startswith('image/'):
                self.logger.warning(f"URL不是图片: {url}")
                return False
            
            # 确保目录存在
            save_path.parent.mkdir(parents=True, exist_ok=True)
            
            # 下载图片
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
            
            self.downloaded_urls.add(url)
            self.stats['images_downloaded'] += 1
            self.logger.info(f"下载成功: {save_path.name}")
            return True
            
        except Exception as e:
            self.logger.error(f"下载图片失败 {url}: {str(e)}")
            self.stats['errors'] += 1
            return False
    
    def download_album(self, album_url: str, album_title: str) -> None:
        """下载单个相册"""
        self.logger.info(f"开始下载相册: {album_title}")
        
        # 创建相册目录
        album_dir = self.download_dir / album_title
        album_dir.mkdir(exist_ok=True)
        
        # 获取相册页面
        soup = self.get_page_content(album_url)
        if not soup:
            return
        
        # 提取图片URL
        image_urls = self.extract_image_urls(soup, album_url)
        
        if not image_urls:
            self.logger.warning(f"相册中未找到图片: {album_title}")
            return
        
        self.logger.info(f"找到 {len(image_urls)} 张图片")
        
        # 下载图片
        with tqdm(total=len(image_urls), desc=f"下载 {album_title}") as pbar:
            for i, img_url in enumerate(image_urls, 1):
                # 生成文件名
                file_ext = os.path.splitext(urllib.parse.urlparse(img_url).path)[1] or '.jpg'
                filename = f"{i:03d}{file_ext}"
                save_path = album_dir / filename
                
                # 如果文件已存在，跳过
                if save_path.exists():
                    self.stats['images_skipped'] += 1
                    pbar.update(1)
                    continue
                
                # 下载图片
                self.download_image(img_url, save_path)
                pbar.update(1)
                
                # 添加延迟避免请求过快
                time.sleep(0.5)
    
    def get_pagination_urls(self, soup: BeautifulSoup, base_url: str) -> List[str]:
        """获取分页URL"""
        pagination_urls = []
        
        # 常见的分页选择器
        pagination_selectors = [
            '.pagination a',
            '.page-numbers a',
            '.pager a',
            'a[href*="page"]',
            'a[href*="p="]',
            '.next',
            '.page-next'
        ]
        
        for selector in pagination_selectors:
            links = soup.select(selector)
            for link in links:
                href = link.get('href')
                if href:
                    full_url = urllib.parse.urljoin(base_url, href)
                    if full_url not in pagination_urls:
                        pagination_urls.append(full_url)
        
        return pagination_urls
    
    def download_all_albums(self, max_pages: int = 10) -> None:
        """下载所有相册"""
        self.logger.info(f"开始下载相册网站: {self.base_url}")
        
        visited_urls = set()
        urls_to_visit = [self.base_url]
        page_count = 0
        
        while urls_to_visit and page_count < max_pages:
            current_url = urls_to_visit.pop(0)
            
            if current_url in visited_urls:
                continue
            
            visited_urls.add(current_url)
            page_count += 1
            
            self.logger.info(f"处理第 {page_count} 页: {current_url}")
            
            # 获取页面内容
            soup = self.get_page_content(current_url)
            if not soup:
                continue
            
            # 提取相册链接
            albums = self.extract_album_links(soup, current_url)
            self.stats['albums_found'] += len(albums)
            
            self.logger.info(f"找到 {len(albums)} 个相册")
            
            # 下载每个相册
            for album in albums:
                self.download_album(album['url'], album['title'])
                time.sleep(1)  # 添加延迟
            
            # 获取分页链接
            if page_count < max_pages:
                pagination_urls = self.get_pagination_urls(soup, current_url)
                for url in pagination_urls:
                    if url not in visited_urls and url not in urls_to_visit:
                        urls_to_visit.append(url)
        
        # 输出统计信息
        self.print_stats()
    
    def print_stats(self) -> None:
        """打印下载统计信息"""
        self.logger.info("=" * 50)
        self.logger.info("下载完成！统计信息:")
        self.logger.info(f"发现相册数量: {self.stats['albums_found']}")
        self.logger.info(f"下载图片数量: {self.stats['images_downloaded']}")
        self.logger.info(f"跳过图片数量: {self.stats['images_skipped']}")
        self.logger.info(f"错误数量: {self.stats['errors']}")
        self.logger.info("=" * 50)


def main():
    """主函数"""
    # 目标网站URL
    target_url = "https://jjcos.com/tag/EbKijoqVm671/"
    
    # 创建下载器
    downloader = AlbumDownloader(target_url, "downloads/jjcos_albums")
    
    # 开始下载
    try:
        downloader.download_all_albums(max_pages=20)  # 最多处理20页
    except KeyboardInterrupt:
        downloader.logger.info("用户中断下载")
    except Exception as e:
        downloader.logger.error(f"下载过程中发生错误: {str(e)}")
    finally:
        downloader.print_stats()


if __name__ == "__main__":
    main()