#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
增强版相册网站图片下载器
支持代理、更好的反检测机制
作者: AI Assistant
创建时间: 2025
"""

import os
import sys
import requests
from bs4 import BeautifulSoup
import urllib.parse
import time
import logging
from datetime import datetime
import re
from pathlib import Path
from typing import List, Dict, Set, Optional
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from tqdm import tqdm
import urllib3
import random
import json

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class EnhancedAlbumDownloader:
    """增强版相册下载器类"""
    
    def __init__(self, base_url: str, download_dir: str = "downloads", use_proxy: bool = False):
        """
        初始化下载器
        
        Args:
            base_url: 相册网站的基础URL
            download_dir: 下载目录
            use_proxy: 是否使用代理
        """
        self.base_url = base_url
        self.download_dir = Path(download_dir)
        self.download_dir.mkdir(exist_ok=True)
        self.use_proxy = use_proxy
        
        # 设置日志
        self.setup_logger()
        
        # 设置请求会话
        self.session = self.create_session()
        
        # 已下载的图片URL集合，避免重复下载
        self.downloaded_urls: Set[str] = set()
        
        # 统计信息
        self.stats = {
            'albums_found': 0,
            'images_downloaded': 0,
            'images_skipped': 0,
            'errors': 0
        }
        
        # 用户代理列表
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        ]
    
    def setup_logger(self) -> None:
        """设置日志记录器"""
        log_dir = Path("logs")
        log_dir.mkdir(exist_ok=True)
        
        log_filename = log_dir / f'enhanced_album_downloader_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'
        
        formatter = logging.Formatter(
            '%(asctime)s [%(levelname)s] %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S'
        )
        
        # 文件处理器
        file_handler = logging.FileHandler(log_filename, encoding='utf-8')
        file_handler.setFormatter(formatter)
        
        # 控制台处理器
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        
        # 配置日志记录器
        self.logger = logging.getLogger('EnhancedAlbumDownloader')
        self.logger.setLevel(logging.INFO)
        self.logger.handlers.clear()
        self.logger.addHandler(file_handler)
        self.logger.addHandler(console_handler)
    
    def get_random_user_agent(self) -> str:
        """获取随机用户代理"""
        return random.choice(self.user_agents)
    
    def create_session(self) -> requests.Session:
        """创建带重试机制的请求会话"""
        session = requests.Session()
        
        # 设置重试策略
        retry_strategy = Retry(
            total=5,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["HEAD", "GET", "OPTIONS"],
            backoff_factor=2
        )
        
        adapter = HTTPAdapter(max_retries=retry_strategy)
        session.mount("http://", adapter)
        session.mount("https://", adapter)
        
        # 设置基础请求头
        session.headers.update({
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Cache-Control': 'max-age=0',
            'DNT': '1'
        })
        
        return session
    
    def clean_filename(self, filename: str, is_folder: bool = False) -> str:
        """清理文件名，移除非法字符"""
        # 移除非法字符
        invalid_chars = '<>:"/\\|?*\n\r\t'
        if is_folder:
            invalid_chars += '[]()（）《》【】『』「」［］〈〉{}！!@#$%^&*=+'
        
        for char in invalid_chars:
            filename = filename.replace(char, '_')
        
        # 移除多余空格并限制长度
        filename = re.sub(r'\s+', '_', filename.strip())
        if len(filename) > 100:
            filename = filename[:100]
        
        # 确保文件名不为空
        if not filename:
            filename = 'untitled_folder' if is_folder else 'untitled'
        
        return filename
    
    def get_page_content(self, url: str, retries: int = 3) -> Optional[BeautifulSoup]:
        """获取页面内容并解析为BeautifulSoup对象"""
        for attempt in range(retries):
            try:
                self.logger.info(f"正在获取页面 (尝试 {attempt + 1}/{retries}): {url}")
                
                # 随机延迟
                time.sleep(random.uniform(2, 5))
                
                # 更新用户代理
                self.session.headers.update({
                    'User-Agent': self.get_random_user_agent()
                })
                
                # 尝试不同的请求方式
                if attempt == 0:
                    # 第一次尝试：正常请求
                    response = self.session.get(url, timeout=30, verify=False)
                elif attempt == 1:
                    # 第二次尝试：添加更多头部
                    headers = {
                        'Referer': 'https://www.google.com/',
                        'Origin': 'https://www.google.com'
                    }
                    response = self.session.get(url, timeout=30, verify=False, headers=headers)
                else:
                    # 第三次尝试：模拟从搜索引擎来的访问
                    headers = {
                        'Referer': f'https://www.google.com/search?q={urllib.parse.quote(url)}',
                        'X-Forwarded-For': f'{random.randint(1,255)}.{random.randint(1,255)}.{random.randint(1,255)}.{random.randint(1,255)}'
                    }
                    response = self.session.get(url, timeout=30, verify=False, headers=headers)
                
                response.raise_for_status()
                
                # 检查响应内容
                if len(response.text) < 100:
                    self.logger.warning(f"页面内容过短，可能被阻止: {len(response.text)} 字符")
                    if attempt < retries - 1:
                        continue
                
                # 尝试检测编码
                if response.encoding == 'ISO-8859-1':
                    response.encoding = response.apparent_encoding
                
                soup = BeautifulSoup(response.text, 'html.parser')
                self.logger.info(f"成功获取页面，内容长度: {len(response.text)} 字符")
                return soup
                
            except Exception as e:
                self.logger.error(f"获取页面失败 (尝试 {attempt + 1}/{retries}) {url}: {str(e)}")
                if attempt < retries - 1:
                    # 增加延迟时间
                    time.sleep(random.uniform(5, 10))
                else:
                    self.stats['errors'] += 1
        
        return None
    
    def test_connection(self) -> bool:
        """测试网络连接"""
        test_urls = [
            'https://www.baidu.com',
            'https://www.google.com',
            'https://httpbin.org/get'
        ]
        
        for url in test_urls:
            try:
                self.logger.info(f"测试连接: {url}")
                response = self.session.get(url, timeout=10, verify=False)
                if response.status_code == 200:
                    self.logger.info(f"连接测试成功: {url}")
                    return True
            except Exception as e:
                self.logger.warning(f"连接测试失败 {url}: {str(e)}")
        
        self.logger.error("所有连接测试都失败了")
        return False
    
    def extract_album_links(self, soup: BeautifulSoup, base_url: str) -> List[Dict[str, str]]:
        """从页面中提取相册链接"""
        albums = []
        
        # 更广泛的选择器
        album_selectors = [
            'a[href*="album"]',
            'a[href*="gallery"]',
            'a[href*="set"]',
            'a[href*="photo"]',
            'a[href*="pic"]',
            'a[href*="image"]',
            '.album-item a',
            '.gallery-item a',
            '.thumbnail a',
            '.photo-item a',
            '.item a',
            'a img',  # 包含图片的链接
            'div[class*="item"] a',
            'div[class*="thumb"] a',
            'div[class*="photo"] a'
        ]
        
        for selector in album_selectors:
            try:
                links = soup.select(selector)
                for link in links:
                    href = link.get('href')
                    if href and href != '#' and not href.startswith('javascript:'):
                        # 转换为绝对URL
                        full_url = urllib.parse.urljoin(base_url, href)
                        
                        # 过滤掉明显不是相册的链接
                        if any(skip in full_url.lower() for skip in ['login', 'register', 'contact', 'about', 'help']):
                            continue
                        
                        # 获取相册标题
                        title = ''
                        if link.get('title'):
                            title = link['title']
                        elif link.find('img') and link.find('img').get('alt'):
                            title = link.find('img')['alt']
                        elif link.text.strip():
                            title = link.text.strip()
                        else:
                            title = f"album_{len(albums) + 1}"
                        
                        albums.append({
                            'url': full_url,
                            'title': self.clean_filename(title, is_folder=True)
                        })
            except Exception as e:
                self.logger.warning(f"选择器 {selector} 解析失败: {str(e)}")
        
        # 去重
        seen_urls = set()
        unique_albums = []
        for album in albums:
            if album['url'] not in seen_urls:
                seen_urls.add(album['url'])
                unique_albums.append(album)
        
        return unique_albums
    
    def simple_download_test(self) -> None:
        """简单的下载测试"""
        self.logger.info("开始简单下载测试...")
        
        # 首先测试网络连接
        if not self.test_connection():
            self.logger.error("网络连接测试失败，请检查网络设置")
            return
        
        # 获取目标页面
        soup = self.get_page_content(self.base_url)
        if not soup:
            self.logger.error("无法获取目标页面")
            return
        
        # 分析页面结构
        self.logger.info("分析页面结构...")
        
        # 获取页面标题
        title = soup.find('title')
        if title:
            self.logger.info(f"页面标题: {title.text.strip()}")
        
        # 统计各种元素
        links = soup.find_all('a', href=True)
        images = soup.find_all('img')
        
        self.logger.info(f"找到 {len(links)} 个链接")
        self.logger.info(f"找到 {len(images)} 个图片")
        
        # 显示前几个链接
        self.logger.info("前10个链接:")
        for i, link in enumerate(links[:10], 1):
            href = link['href']
            text = link.get_text(strip=True)[:50]
            self.logger.info(f"  {i}. {text} -> {href}")
        
        # 尝试提取相册链接
        albums = self.extract_album_links(soup, self.base_url)
        self.logger.info(f"提取到 {len(albums)} 个可能的相册链接")
        
        for i, album in enumerate(albums[:5], 1):
            self.logger.info(f"  相册 {i}: {album['title']} -> {album['url']}")
        
        self.logger.info("简单测试完成")

def main():
    """主函数"""
    # 目标网站URL
    target_url = "https://jjcos.com/tag/EbKijoqVm671/"
    
    # 创建下载器
    downloader = EnhancedAlbumDownloader(target_url, "downloads/jjcos_albums_enhanced")
    
    # 开始测试
    try:
        downloader.simple_download_test()
    except KeyboardInterrupt:
        downloader.logger.info("用户中断测试")
    except Exception as e:
        downloader.logger.error(f"测试过程中发生错误: {str(e)}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()