#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
深度相册下载器
能够进入相册页面下载高清图片
"""

import requests
from bs4 import BeautifulSoup
import os
import sys
import time
import random
import logging
from urllib.parse import urljoin, urlparse
import urllib3
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import gzip
import io
import re

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class DeepAlbumDownloader:
    def __init__(self, base_url="https://jjcos.com/tag/EbKijoqVm671/", download_dir="downloads"):
        self.base_url = base_url
        self.download_dir = download_dir
        self.session = None
        self.logger = None
        self.setup_logging()
        self.setup_session()
        
        # 统计信息
        self.stats = {
            'pages_processed': 0,
            'albums_found': 0,
            'albums_processed': 0,
            'albums_skipped': 0,
            'images_found': 0,
            'images_downloaded': 0,
            'errors': 0
        }
        
        # 已处理的URL集合，避免重复处理
        self.processed_urls = set()
        # 已下载的相册目录集合，避免重复下载
        self.downloaded_albums = set()
        self.load_downloaded_albums()
    
    def setup_logging(self):
        """设置日志记录"""
        # 创建logs目录
        log_dir = "logs"
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        
        # 设置日志文件名
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        log_file = os.path.join(log_dir, f"deep_album_downloader_{timestamp}.log")
        
        # 配置日志
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s [%(levelname)s] %(message)s',
            handlers=[
                logging.FileHandler(log_file, encoding='utf-8'),
                logging.StreamHandler(sys.stdout)
            ]
        )
        self.logger = logging.getLogger(__name__)
        self.logger.info(f"日志文件: {log_file}")
    
    def setup_session(self):
        """设置请求会话"""
        self.session = requests.Session()
        
        # 设置重试策略
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["HEAD", "GET", "OPTIONS"]
        )
        
        adapter = HTTPAdapter(max_retries=retry_strategy)
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)
        
        # 设置请求头 - 模拟真实浏览器
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Cache-Control': 'max-age=0',
            'DNT': '1'
        })
    
    def get_page_content(self, url, max_retries=3):
        """获取页面内容，处理压缩和编码"""
        for attempt in range(max_retries):
            try:
                self.logger.info(f"正在获取页面 (尝试 {attempt + 1}/{max_retries}): {url}")
                
                # 添加随机延迟
                delay = random.uniform(1, 3)
                time.sleep(delay)
                
                response = self.session.get(
                    url, 
                    timeout=30, 
                    verify=False,
                    stream=True
                )
                response.raise_for_status()
                
                # 检查内容编码
                content_encoding = response.headers.get('content-encoding', '').lower()
                
                # 获取原始内容
                raw_content = response.content
                
                # 处理压缩内容
                if content_encoding == 'gzip':
                    try:
                        content = gzip.decompress(raw_content).decode('utf-8')
                    except Exception as e:
                        content = raw_content.decode('utf-8', errors='ignore')
                elif content_encoding == 'deflate':
                    try:
                        import zlib
                        content = zlib.decompress(raw_content).decode('utf-8')
                    except Exception as e:
                        content = raw_content.decode('utf-8', errors='ignore')
                elif content_encoding == 'br':
                    try:
                        import brotli
                        content = brotli.decompress(raw_content).decode('utf-8')
                    except Exception as e:
                        content = raw_content.decode('utf-8', errors='ignore')
                else:
                    # 尝试不同的编码
                    encodings = ['utf-8', 'gbk', 'gb2312', 'iso-8859-1']
                    content = None
                    for encoding in encodings:
                        try:
                            content = raw_content.decode(encoding)
                            break
                        except UnicodeDecodeError:
                            continue
                    
                    if content is None:
                        content = raw_content.decode('utf-8', errors='ignore')
                
                self.logger.info(f"成功获取页面，内容长度: {len(content)} 字符")
                return content
                
            except Exception as e:
                self.logger.error(f"获取页面失败 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
                if attempt == max_retries - 1:
                    raise
                time.sleep(2 ** attempt)
        
        return None
    
    def extract_album_links(self, content, base_url):
        """从页面内容中提取相册链接"""
        try:
            soup = BeautifulSoup(content, 'html.parser')
            album_links = []
            
            # 查找所有链接
            links = soup.find_all('a', href=True)
            
            for link in links:
                href = link.get('href', '')
                text = link.get_text(strip=True).lower()
                
                # 构建完整URL
                full_url = urljoin(base_url, href)
                
                # 检查是否是相册相关链接
                is_album = False
                
                # 1. 检查URL路径中的关键词
                if any(keyword in href.lower() for keyword in ['/post/', '/album/', '/gallery/', '/photo/', '/cosplay']):
                    is_album = True
                
                # 2. 检查链接文本中的关键词
                elif any(keyword in text for keyword in ['cosplay', '相册', '图片', '照片', '画廊', 'set']):
                    is_album = True
                
                # 3. 检查是否包含模特名字或套图名称
                elif any(keyword in text for keyword in ['蠢沫沫', 'chunmomo', '套图', 'set.']):
                    is_album = True
                
                # 4. 检查父元素是否有相册相关的class
                parent = link.parent
                if parent and parent.get('class'):
                    parent_classes = ' '.join(parent.get('class', [])).lower()
                    if any(keyword in parent_classes for keyword in ['post', 'album', 'gallery', 'item']):
                        is_album = True
                
                if is_album and full_url not in self.processed_urls:
                    # 过滤掉一些明显不是相册的链接
                    if not any(skip in href.lower() for skip in ['#', 'javascript:', 'mailto:', '/tag/', '/tags/', '/about', '/archives']):
                        album_links.append(full_url)
            
            # 去重
            album_links = list(set(album_links))
            self.logger.info(f"提取到 {len(album_links)} 个可能的相册链接")
            
            for i, link in enumerate(album_links[:10], 1):
                self.logger.info(f"  {i}. {link}")
            
            return album_links
            
        except Exception as e:
            self.logger.error(f"提取相册链接失败: {str(e)}")
            return []
    
    def extract_images_from_album(self, content, base_url):
        """从相册页面提取图片链接"""
        try:
            soup = BeautifulSoup(content, 'html.parser')
            image_urls = []
            
            # 查找所有图片标签
            images = soup.find_all('img')
            
            for img in images:
                src = img.get('src', '')
                data_src = img.get('data-src', '')  # 懒加载图片
                data_original = img.get('data-original', '')  # 原图链接
                
                # 优先使用原图链接
                img_url = data_original or data_src or src
                
                if img_url:
                    # 构建完整URL
                    full_url = urljoin(base_url, img_url)
                    
                    # 过滤掉一些不需要的图片
                    if not any(skip in img_url.lower() for skip in ['avatar', 'logo', 'icon', 'banner', 'ad']):
                        # 检查是否是有效的图片URL
                        if any(ext in img_url.lower() for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp']):
                            image_urls.append(full_url)
            
            # 查找可能的高清图片链接（在链接中）
            links = soup.find_all('a', href=True)
            for link in links:
                href = link.get('href', '')
                if any(ext in href.lower() for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp']):
                    full_url = urljoin(base_url, href)
                    image_urls.append(full_url)
            
            # 去重
            image_urls = list(set(image_urls))
            self.logger.info(f"从相册页面提取到 {len(image_urls)} 个图片链接")
            
            return image_urls
            
        except Exception as e:
            self.logger.error(f"提取图片链接失败: {str(e)}")
            return []
    
    def download_image(self, img_url, filename, album_dir):
        """下载单个图片"""
        try:
            response = self.session.get(img_url, timeout=30, verify=False)
            response.raise_for_status()
            
            # 确保下载目录存在
            full_dir = os.path.join(self.download_dir, album_dir)
            os.makedirs(full_dir, exist_ok=True)
            
            filepath = os.path.join(full_dir, filename)
            with open(filepath, 'wb') as f:
                f.write(response.content)
            
            self.logger.info(f"下载成功: {album_dir}/{filename}")
            self.stats['images_downloaded'] += 1
            return True
            
        except Exception as e:
            self.logger.error(f"下载失败 {img_url}: {str(e)}")
            self.stats['errors'] += 1
            return False
    
    def load_downloaded_albums(self):
        """加载已下载的相册列表"""
        try:
            if os.path.exists(self.download_dir):
                for item in os.listdir(self.download_dir):
                    item_path = os.path.join(self.download_dir, item)
                    if os.path.isdir(item_path) and not item.startswith('.'):
                        self.downloaded_albums.add(item)
                self.logger.info(f"发现已下载相册: {len(self.downloaded_albums)} 个")
        except Exception as e:
            self.logger.error(f"加载已下载相册列表失败: {str(e)}")
    
    def is_album_downloaded(self, album_title):
        """检查相册是否已下载"""
        return album_title in self.downloaded_albums
    
    def process_album(self, album_url):
        """处理单个相册"""
        try:
            if album_url in self.processed_urls:
                return
            
            self.processed_urls.add(album_url)
            self.logger.info(f"\n=== 处理相册: {album_url} ===")
            
            # 获取相册页面内容
            content = self.get_page_content(album_url)
            if not content:
                self.logger.error(f"无法获取相册页面内容: {album_url}")
                return
            
            # 提取相册标题作为目录名
            soup = BeautifulSoup(content, 'html.parser')
            title_elem = soup.find('title')
            if title_elem:
                album_title = title_elem.text.strip()
                # 清理文件名中的非法字符
                album_title = re.sub(r'[<>:"/\\|?*]', '_', album_title)
                album_title = album_title[:50]  # 限制长度
            else:
                album_title = f"album_{int(time.time())}"
            
            self.logger.info(f"相册标题: {album_title}")
            
            # 检查是否已下载
            if self.is_album_downloaded(album_title):
                self.logger.info(f"相册已存在，跳过下载: {album_title}")
                self.stats['albums_skipped'] += 1
                return
            
            # 提取图片链接
            image_urls = self.extract_images_from_album(content, album_url)
            
            if not image_urls:
                self.logger.warning(f"相册中没有找到图片: {album_url}")
                return
            
            self.stats['images_found'] += len(image_urls)
            
            # 下载图片
            for i, img_url in enumerate(image_urls, 1):
                # 生成文件名
                parsed_url = urlparse(img_url)
                original_filename = os.path.basename(parsed_url.path)
                if not original_filename or '.' not in original_filename:
                    original_filename = f"image_{i:03d}.jpg"
                
                filename = f"{i:03d}_{original_filename}"
                
                self.download_image(img_url, filename, album_title)
                
                # 添加延迟避免被封
                time.sleep(random.uniform(0.5, 1.5))
            
            # 标记相册为已下载
            self.downloaded_albums.add(album_title)
            self.stats['albums_processed'] += 1
            self.logger.info(f"相册处理完成: {album_title} ({len(image_urls)} 张图片)")
            
        except Exception as e:
            self.logger.error(f"处理相册失败 {album_url}: {str(e)}")
            self.stats['errors'] += 1
    
    def process_page(self, page_url):
        """处理单个页面，提取相册链接"""
        try:
            self.logger.info(f"\n=== 处理页面: {page_url} ===")
            
            # 获取页面内容
            content = self.get_page_content(page_url)
            if not content:
                self.logger.error(f"无法获取页面内容: {page_url}")
                return []
            
            # 提取相册链接
            album_links = self.extract_album_links(content, page_url)
            self.logger.info(f"页面发现相册: {len(album_links)} 个")
            
            return album_links
            
        except Exception as e:
            self.logger.error(f"处理页面失败 {page_url}: {str(e)}")
            return []
    
    def run(self):
        """运行深度下载器，支持多页处理"""
        try:
            self.logger.info(f"开始深度处理: {self.base_url}")
            
            all_album_links = []
            page_num = 1
            
            # 处理第一页
            first_page_albums = self.process_page(self.base_url)
            if first_page_albums:
                all_album_links.extend(first_page_albums)
                self.stats['pages_processed'] += 1
            
            # 处理后续页面
            while True:
                page_num += 1
                # 构建分页URL
                if self.base_url.endswith('/'):
                    page_url = f"{self.base_url}page/{page_num}/"
                else:
                    page_url = f"{self.base_url}/page/{page_num}/"
                
                self.logger.info(f"\n尝试获取第 {page_num} 页: {page_url}")
                
                try:
                    # 检查页面是否存在（404检测）
                    response = self.session.get(page_url, timeout=30, verify=False)
                    if response.status_code == 404:
                        self.logger.info(f"第 {page_num} 页不存在 (404)，停止分页处理")
                        break
                    elif response.status_code != 200:
                        self.logger.warning(f"第 {page_num} 页返回状态码: {response.status_code}")
                        break
                    
                    # 处理页面内容
                    page_albums = self.process_page(page_url)
                    if not page_albums:
                        self.logger.info(f"第 {page_num} 页没有找到相册，可能已到最后一页")
                        break
                    
                    all_album_links.extend(page_albums)
                    self.stats['pages_processed'] += 1
                    
                    # 添加页面间延迟
                    time.sleep(random.uniform(3, 5))
                    
                except Exception as e:
                    self.logger.error(f"处理第 {page_num} 页失败: {str(e)}")
                    break
            
            # 去重相册链接
            unique_albums = list(set(all_album_links))
            self.stats['albums_found'] = len(unique_albums)
            
            self.logger.info(f"\n总共发现 {len(unique_albums)} 个唯一相册")
            
            if not unique_albums:
                self.logger.warning("没有找到相册链接")
                return
            
            # 处理每个相册
            for i, album_url in enumerate(unique_albums, 1):
                self.logger.info(f"\n相册处理进度: {i}/{len(unique_albums)}")
                self.process_album(album_url)
                
                # 添加延迟避免被封
                time.sleep(random.uniform(2, 4))
            
            # 输出统计信息
            self.logger.info("\n=== 深度下载完成 ===")
            self.logger.info(f"处理页面: {self.stats['pages_processed']} 页")
            self.logger.info(f"发现相册: {self.stats['albums_found']} 个")
            self.logger.info(f"处理相册: {self.stats['albums_processed']} 个")
            self.logger.info(f"跳过相册: {self.stats['albums_skipped']} 个")
            self.logger.info(f"发现图片: {self.stats['images_found']} 张")
            self.logger.info(f"下载成功: {self.stats['images_downloaded']} 张")
            self.logger.info(f"错误数量: {self.stats['errors']} 个")
            
        except Exception as e:
            self.logger.error(f"运行失败: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())

def main():
    """主函数"""
    downloader = DeepAlbumDownloader()
    downloader.run()

if __name__ == "__main__":
    main()