#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
修复版相册下载器
处理压缩内容和编码问题
"""

import requests
from bs4 import BeautifulSoup
import os
import sys
import time
import random
import logging
from urllib.parse import urljoin, urlparse
import urllib3
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import gzip
import io

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class AlbumDownloader:
    def __init__(self, base_url="https://jjcos.com/tag/EbKijoqVm671/", download_dir="downloads"):
        self.base_url = base_url
        self.download_dir = download_dir
        self.session = None
        self.logger = None
        self.setup_logging()
        self.setup_session()
        
        # 统计信息
        self.stats = {
            'found': 0,
            'downloaded': 0,
            'errors': 0
        }
    
    def setup_logging(self):
        """设置日志记录"""
        # 创建logs目录
        log_dir = "logs"
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        
        # 设置日志文件名
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        log_file = os.path.join(log_dir, f"fixed_album_downloader_{timestamp}.log")
        
        # 配置日志
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s [%(levelname)s] %(message)s',
            handlers=[
                logging.FileHandler(log_file, encoding='utf-8'),
                logging.StreamHandler(sys.stdout)
            ]
        )
        self.logger = logging.getLogger(__name__)
        self.logger.info(f"日志文件: {log_file}")
    
    def setup_session(self):
        """设置请求会话"""
        self.session = requests.Session()
        
        # 设置重试策略
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["HEAD", "GET", "OPTIONS"]
        )
        
        adapter = HTTPAdapter(max_retries=retry_strategy)
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)
        
        # 设置请求头 - 模拟真实浏览器
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Cache-Control': 'max-age=0',
            'DNT': '1'
        })
    
    def get_page_content(self, url, max_retries=3):
        """获取页面内容，处理压缩和编码"""
        for attempt in range(max_retries):
            try:
                self.logger.info(f"正在获取页面 (尝试 {attempt + 1}/{max_retries}): {url}")
                
                # 添加随机延迟
                delay = random.uniform(1, 3)
                time.sleep(delay)
                
                response = self.session.get(
                    url, 
                    timeout=30, 
                    verify=False,
                    stream=True  # 使用流式下载
                )
                response.raise_for_status()
                
                # 检查内容编码
                content_encoding = response.headers.get('content-encoding', '').lower()
                self.logger.info(f"内容编码: {content_encoding}")
                self.logger.info(f"内容类型: {response.headers.get('content-type', 'unknown')}")
                
                # 获取原始内容
                raw_content = response.content
                
                # 处理压缩内容
                if content_encoding == 'gzip':
                    try:
                        content = gzip.decompress(raw_content).decode('utf-8')
                        self.logger.info("成功解压gzip内容")
                    except Exception as e:
                        self.logger.warning(f"gzip解压失败: {e}，尝试直接解码")
                        content = raw_content.decode('utf-8', errors='ignore')
                elif content_encoding == 'deflate':
                    try:
                        import zlib
                        content = zlib.decompress(raw_content).decode('utf-8')
                        self.logger.info("成功解压deflate内容")
                    except Exception as e:
                        self.logger.warning(f"deflate解压失败: {e}，尝试直接解码")
                        content = raw_content.decode('utf-8', errors='ignore')
                elif content_encoding == 'br':
                    try:
                        import brotli
                        content = brotli.decompress(raw_content).decode('utf-8')
                        self.logger.info("成功解压brotli内容")
                    except Exception as e:
                        self.logger.warning(f"brotli解压失败: {e}，尝试直接解码")
                        content = raw_content.decode('utf-8', errors='ignore')
                else:
                    # 尝试不同的编码
                    encodings = ['utf-8', 'gbk', 'gb2312', 'iso-8859-1']
                    content = None
                    for encoding in encodings:
                        try:
                            content = raw_content.decode(encoding)
                            self.logger.info(f"成功使用 {encoding} 编码解码")
                            break
                        except UnicodeDecodeError:
                            continue
                    
                    if content is None:
                        content = raw_content.decode('utf-8', errors='ignore')
                        self.logger.warning("使用忽略错误的UTF-8解码")
                
                self.logger.info(f"成功获取页面，内容长度: {len(content)} 字符")
                
                # 保存调试文件
                debug_file = f"debug_page_{int(time.time())}.html"
                with open(debug_file, 'w', encoding='utf-8') as f:
                    f.write(content)
                self.logger.info(f"页面内容已保存到: {debug_file}")
                
                return content
                
            except Exception as e:
                self.logger.error(f"获取页面失败 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
                if attempt == max_retries - 1:
                    raise
                time.sleep(2 ** attempt)  # 指数退避
        
        return None
    
    def parse_page(self, content):
        """解析页面内容"""
        try:
            soup = BeautifulSoup(content, 'html.parser')
            
            # 获取页面标题
            title = soup.find('title')
            if title:
                self.logger.info(f"页面标题: {title.text.strip()}")
            
            # 查找所有链接
            links = soup.find_all('a', href=True)
            self.logger.info(f"找到 {len(links)} 个链接")
            
            # 查找所有图片
            images = soup.find_all('img')
            self.logger.info(f"找到 {len(images)} 个图片")
            
            # 显示前10个链接
            self.logger.info("前10个链接:")
            for i, link in enumerate(links[:10], 1):
                href = link.get('href', '')
                text = link.get_text(strip=True)[:50]
                self.logger.info(f"  {i}. {text} -> {href}")
            
            # 显示前10个图片
            self.logger.info("前10个图片:")
            for i, img in enumerate(images[:10], 1):
                src = img.get('src', '')
                alt = img.get('alt', '')[:30]
                self.logger.info(f"  {i}. {alt} -> {src}")
            
            # 查找可能的相册链接
            album_links = []
            for link in links:
                href = link.get('href', '')
                text = link.get_text(strip=True).lower()
                
                # 检查是否是相册相关链接
                if any(keyword in href.lower() for keyword in ['album', 'gallery', 'photo', 'image', 'pic']):
                    album_links.append(urljoin(self.base_url, href))
                elif any(keyword in text for keyword in ['相册', '图片', '照片', '画廊']):
                    album_links.append(urljoin(self.base_url, href))
            
            # 去重
            album_links = list(set(album_links))
            self.logger.info(f"提取到 {len(album_links)} 个可能的相册链接")
            
            for i, link in enumerate(album_links[:5], 1):
                self.logger.info(f"  {i}. {link}")
            
            return album_links, images
            
        except Exception as e:
            self.logger.error(f"解析页面失败: {str(e)}")
            return [], []
    
    def download_image(self, img_url, filename):
        """下载单个图片"""
        try:
            response = self.session.get(img_url, timeout=30, verify=False)
            response.raise_for_status()
            
            # 确保下载目录存在
            os.makedirs(self.download_dir, exist_ok=True)
            
            filepath = os.path.join(self.download_dir, filename)
            with open(filepath, 'wb') as f:
                f.write(response.content)
            
            self.logger.info(f"下载成功: {filename}")
            self.stats['downloaded'] += 1
            return True
            
        except Exception as e:
            self.logger.error(f"下载失败 {img_url}: {str(e)}")
            self.stats['errors'] += 1
            return False
    
    def run(self):
        """运行下载器"""
        try:
            self.logger.info(f"开始处理: {self.base_url}")
            
            # 获取页面内容
            content = self.get_page_content(self.base_url)
            if not content:
                self.logger.error("无法获取页面内容")
                return
            
            # 解析页面
            album_links, images = self.parse_page(content)
            
            # 处理找到的图片
            for i, img in enumerate(images, 1):
                src = img.get('src', '')
                if src:
                    # 构建完整URL
                    img_url = urljoin(self.base_url, src)
                    
                    # 生成文件名
                    filename = f"image_{i:03d}_{os.path.basename(urlparse(img_url).path)}"
                    if not filename.endswith(('.jpg', '.jpeg', '.png', '.gif', '.webp')):
                        filename += '.jpg'
                    
                    self.stats['found'] += 1
                    self.download_image(img_url, filename)
            
            # 输出统计信息
            self.logger.info("=== 下载完成 ===")
            self.logger.info(f"发现图片: {self.stats['found']} 个")
            self.logger.info(f"下载成功: {self.stats['downloaded']} 个")
            self.logger.info(f"下载失败: {self.stats['errors']} 个")
            
        except Exception as e:
            self.logger.error(f"运行失败: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())

def main():
    """主函数"""
    downloader = AlbumDownloader()
    downloader.run()

if __name__ == "__main__":
    main()