#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
图片下载器模块
提供从微信文章URL中提取和下载图片的功能
"""

import os
import re
import time
import logging
import requests
import traceback
import pymysql
from pathlib import Path
from datetime import datetime
from playwright.sync_api import sync_playwright, TimeoutError as PlaywrightTimeoutError
from urllib.parse import urlparse, urljoin
import random

# 导入配置
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import DB_CONFIG, VIDEO_CONFIG, BROWSER_CONFIG

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class ImageDownloader:
    """图片下载器类"""
    
    def __init__(self):
        """初始化图片下载器"""
        self.download_dir = VIDEO_CONFIG['download_dir']  # 使用同一个下载目录
        self.max_retry = 3
        self.timeout = 30
        
        # 确保下载目录存在
        self.download_dir.mkdir(parents=True, exist_ok=True)
        
        logger.info(f"图片下载器初始化完成，下载目录: {self.download_dir}")
    
    def download_images_from_article(self, article_url, article_title=None):
        """
        从文章URL中下载所有图片
        
        Args:
            article_url (str): 文章URL
            article_title (str, optional): 文章标题，如果不提供会自动提取
            
        Returns:
            dict: 下载结果
        """
        logger.info(f"开始处理文章图片: {article_url}")
        
        result = {
            'success': True,
            'article_url': article_url,
            'article_title': article_title,
            'image_urls': [],
            'downloaded_images': [],
            'failed_images': [],
            'error': None
        }
        
        try:
            # 如果没有提供标题，尝试从URL或页面提取
            if not article_title:
                article_title = self._extract_title_from_url(article_url)
                result['article_title'] = article_title
            
            # 提取图片URL
            image_urls = self._extract_image_urls(article_url, article_title)
            result['image_urls'] = image_urls
            
            if not image_urls:
                logger.warning("未找到任何图片URL")
                return result
            
            logger.info(f"找到 {len(image_urls)} 张图片，开始下载...")
            
            # 下载每张图片
            for i, image_url in enumerate(image_urls):
                try:
                    download_result = self._download_single_image(
                        image_url, article_title, i
                    )
                    
                    if download_result['success']:
                        result['downloaded_images'].append(download_result)
                        logger.info(f"图片 {i+1}/{len(image_urls)} 下载成功")
                    else:
                        result['failed_images'].append({
                            'url': image_url,
                            'error': download_result['error']
                        })
                        logger.error(f"图片 {i+1}/{len(image_urls)} 下载失败: {download_result['error']}")
                    
                    # 添加延迟避免请求过快
                    time.sleep(random.uniform(0.5, 1.5))
                    
                except Exception as e:
                    error_msg = f"下载图片时发生错误: {str(e)}"
                    logger.error(error_msg)
                    result['failed_images'].append({
                        'url': image_url,
                        'error': error_msg
                    })
            
            # 记录下载统计
            self._log_download_summary(result)
            
        except Exception as e:
            result['success'] = False
            result['error'] = str(e)
            logger.error(f"处理文章时发生错误: {e}")
            logger.error(traceback.format_exc())
        
        return result
    
    def _extract_image_urls(self, article_url, article_title):
        """使用Playwright提取文章中的图片URL"""
        image_urls = []
        
        try:
            logger.info("启动Playwright浏览器...")
            with sync_playwright() as p:
                # 启动浏览器
                browser = p.firefox.launch(
                    headless=BROWSER_CONFIG.get('headless', True),
                    slow_mo=BROWSER_CONFIG.get('slow_mo', 100)
                )
                logger.info("浏览器启动成功")
                
                page = browser.new_page()
                page.set_default_timeout(15000)  # 减少超时时间到15秒
                
                # 设置User-Agent
                page.set_extra_http_headers({
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
                })
                
                logger.info(f"正在访问文章页面: {article_url}")
                try:
                    # 使用更短的超时时间和更简单的等待策略
                    page.goto(article_url, wait_until='domcontentloaded', timeout=15000)
                    logger.info("页面加载完成，等待内容渲染...")
                    
                    # 等待页面内容加载
                    time.sleep(2)
                    
                except Exception as e:
                    logger.error(f"页面访问失败: {e}")
                    browser.close()
                    return []
                
                # 方法1: 查找img标签
                try:
                    logger.info("方法1: 查找img标签...")
                    img_elements = page.query_selector_all('img')
                    logger.info(f"找到 {len(img_elements)} 个img标签")
                    
                    for img in img_elements:
                        # 尝试多个可能的属性
                        for attr in ['src', 'data-src', 'data-original', 'data-lazy']:
                            src = img.get_attribute(attr)
                            if src and src.startswith('http'):
                                image_urls.append(src)
                                logger.info(f"找到img标签图片 ({attr}): {src[:50]}...")
                                break
                except Exception as e:
                    logger.warning(f"提取img标签失败: {e}")
                
                # 方法2: 从页面源码中使用正则表达式提取
                try:
                    logger.info("方法2: 正则表达式提取...")
                    page_content = page.content()
                    logger.info(f"页面内容长度: {len(page_content)} 字符")
                    
                    # 图片URL的正则表达式模式
                    image_patterns = [
                        r'https://[^"\']*\.jpg[^"\']*',
                        r'https://[^"\']*\.jpeg[^"\']*',
                        r'https://[^"\']*\.png[^"\']*',
                        r'https://[^"\']*\.gif[^"\']*',
                        r'https://[^"\']*\.webp[^"\']*',
                        r'data-src="([^"]*\.(jpg|jpeg|png|gif|webp)[^"]*)"',
                        r'src="([^"]*\.(jpg|jpeg|png|gif|webp)[^"]*)"',
                        r'https://mmbiz\.qpic\.cn/[^"\']*\.(jpg|jpeg|png|gif|webp)',
                    ]
                    
                    for pattern in image_patterns:
                        matches = re.findall(pattern, page_content, re.IGNORECASE)
                        logger.info(f"模式 {pattern} 找到 {len(matches)} 个匹配")
                        for match in matches:
                            image_url = match if isinstance(match, str) else match[0]
                            if self._is_valid_image_url(image_url):
                                image_urls.append(image_url)
                                logger.info(f"通过正则表达式找到图片: {image_url[:50]}...")
                
                except Exception as e:
                    logger.warning(f"正则表达式提取图片失败: {e}")
                
                # 方法3: 查找CSS背景图片
                try:
                    logger.info("方法3: 查找CSS背景图片...")
                    elements_with_bg = page.query_selector_all('[style*="background"]')
                    logger.info(f"找到 {len(elements_with_bg)} 个带背景的元素")
                    
                    for element in elements_with_bg:
                        style = element.get_attribute('style')
                        if style:
                            bg_matches = re.findall(r'background[^:]*:\s*url\(["\']?([^"\']+)["\']?\)', style)
                            for bg_url in bg_matches:
                                if self._is_valid_image_url(bg_url):
                                    image_urls.append(bg_url)
                                    logger.info(f"找到背景图片: {bg_url[:50]}...")
                except Exception as e:
                    logger.warning(f"提取背景图片失败: {e}")
                
                logger.info("关闭浏览器...")
                browser.close()
                
        except Exception as e:
            logger.error(f"提取图片URL时发生错误: {e}")
            logger.error(traceback.format_exc())
        
        # 去重并过滤
        unique_image_urls = list(set(image_urls))
        valid_image_urls = [url for url in unique_image_urls if self._is_valid_image_url(url)]
        
        logger.info(f"总共提取到 {len(valid_image_urls)} 个有效图片URL")
        return valid_image_urls
    
    def _is_valid_image_url(self, url):
        """检查是否是有效的图片URL"""
        if not url or len(url) < 10:
            return False
        
        # 检查是否是HTTP/HTTPS URL
        if not (url.startswith('http://') or url.startswith('https://') or url.startswith('//')):
            return False
        
        # 检查文件扩展名
        valid_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp']
        url_lower = url.lower()
        
        # 检查URL中是否包含图片扩展名
        has_extension = any(ext in url_lower for ext in valid_extensions)
        
        # 检查是否是微信图片域名
        is_weixin_image = 'mmbiz.qpic.cn' in url or 'mmbiz.qlogo.cn' in url
        
        # 排除一些不需要的图片
        exclude_patterns = [
            'avatar.jpg',  # 头像
            'logo.png',    # logo
            'icon.',       # 图标
            'qrcode',      # 二维码
            'placeholder'  # 占位图
        ]
        
        for pattern in exclude_patterns:
            if pattern in url_lower and not is_weixin_image:
                return False
        
        return has_extension or is_weixin_image
    
    def _download_single_image(self, image_url, article_title, index=0):
        """下载单张图片"""
        result = {
            'success': False,
            'image_url': image_url,
            'local_path': None,
            'file_size': 0,
            'error': None
        }
        
        try:
            # 创建安全的文件名
            safe_title = self._sanitize_filename(article_title)
            images_dir = self.download_dir / safe_title / 'images'
            images_dir.mkdir(parents=True, exist_ok=True)
            
            # 生成文件名
            file_extension = self._get_image_extension(image_url)
            filename = f"image_{index+1:03d}{file_extension}"
            file_path = images_dir / filename
            
            # 如果文件已存在且大小合理，跳过下载
            if file_path.exists() and file_path.stat().st_size > 1024:
                logger.info(f"图片文件已存在，跳过下载: {filename}")
                result['success'] = True
                result['local_path'] = str(file_path)
                result['file_size'] = file_path.stat().st_size
                return result
            
            # 开始下载
            logger.info(f"开始下载图片: {image_url[:50]}...")
            
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Referer': 'https://mp.weixin.qq.com/',
                'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            }
            
            # 尝试下载，支持重试
            for attempt in range(self.max_retry):
                try:
                    response = requests.get(
                        image_url,
                        headers=headers,
                        stream=True,
                        timeout=self.timeout,
                        allow_redirects=True
                    )
                    response.raise_for_status()
                    
                    # 检查内容类型
                    content_type = response.headers.get('content-type', '').lower()
                    if not any(img_type in content_type for img_type in ['image/', 'jpeg', 'png', 'gif', 'webp']):
                        logger.warning(f"响应内容类型不是图片: {content_type}")
                    
                    # 写入文件
                    with open(file_path, 'wb') as f:
                        for chunk in response.iter_content(chunk_size=8192):
                            if chunk:
                                f.write(chunk)
                    
                    # 验证下载的文件
                    if file_path.exists() and file_path.stat().st_size > 0:
                        file_size = file_path.stat().st_size
                        logger.info(f"图片下载成功: {filename} (大小: {file_size} bytes)")
                        
                        result['success'] = True
                        result['local_path'] = str(file_path)
                        result['file_size'] = file_size
                        
                        # 记录到数据库
                        self._log_image_download(article_title, image_url, str(file_path), file_size, True)
                        
                        return result
                    else:
                        raise Exception("下载的文件大小为0")
                        
                except Exception as e:
                    logger.warning(f"下载尝试 {attempt + 1}/{self.max_retry} 失败: {e}")
                    if attempt < self.max_retry - 1:
                        time.sleep(2)  # 等待后重试
                        continue
                    else:
                        raise
            
        except Exception as e:
            error_msg = f"下载图片失败: {str(e)}"
            result['error'] = error_msg
            logger.error(error_msg)
            
            # 记录失败到数据库
            self._log_image_download(article_title, image_url, None, 0, False, error_msg)
        
        return result
    
    def _get_image_extension(self, url):
        """从URL中提取图片文件扩展名"""
        try:
            url_path = url.split('?')[0]  # 去除查询参数
            if '.' in url_path:
                ext = '.' + url_path.split('.')[-1].lower()
                valid_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp']
                if ext in valid_extensions:
                    return ext
            return '.jpg'  # 默认扩展名
        except:
            return '.jpg'
    
    def _sanitize_filename(self, filename):
        """清理文件名，移除不安全字符"""
        if not filename:
            return "unnamed_article"
        
        # 移除或替换不安全字符
        unsafe_chars = ['<', '>', ':', '"', '/', '\\', '|', '?', '*', '\n', '\r', '\t']
        safe_filename = filename
        for char in unsafe_chars:
            safe_filename = safe_filename.replace(char, '_')
        
        # 限制长度
        if len(safe_filename) > 100:
            safe_filename = safe_filename[:100]
            
        return safe_filename.strip()
    
    def _extract_title_from_url(self, url):
        """从URL中提取或生成文章标题"""
        try:
            # 尝试从URL路径中提取标题
            parsed_url = urlparse(url)
            if 's/' in parsed_url.path:
                # 微信文章URL通常包含文章ID
                article_id = parsed_url.path.split('s/')[-1]
                return f"article_{article_id[:10]}"
            else:
                return f"article_{int(time.time())}"
        except:
            return f"article_{int(time.time())}"
    
    def _log_image_download(self, article_title, image_url, local_path, file_size, success, error_message=None):
        """记录图片下载日志到数据库"""
        try:
            conn = pymysql.connect(**DB_CONFIG)
            with conn.cursor() as cursor:
                sql = """
                    INSERT INTO image_download_log 
                    (article_title, image_url, local_path, file_size, download_status, error_message, download_time)
                    VALUES (%s, %s, %s, %s, %s, %s, %s)
                """
                status = 1 if success else 2  # 1=成功, 2=失败
                download_time = datetime.now() if success else None
                
                cursor.execute(sql, (
                    article_title, image_url, local_path, file_size, 
                    status, error_message, download_time
                ))
            conn.commit()
            conn.close()
            
        except Exception as e:
            logger.error(f"记录图片下载日志失败: {e}")
    
    def _log_download_summary(self, result):
        """记录下载摘要"""
        total_images = len(result['image_urls'])
        successful_downloads = len(result['downloaded_images'])
        failed_downloads = len(result['failed_images'])
        
        logger.info("="*50)
        logger.info("图片下载摘要:")
        logger.info(f"文章标题: {result['article_title']}")
        logger.info(f"文章URL: {result['article_url']}")
        logger.info(f"发现图片数量: {total_images}")
        logger.info(f"成功下载: {successful_downloads}")
        logger.info(f"下载失败: {failed_downloads}")
        
        if result['downloaded_images']:
            logger.info("成功下载的图片:")
            for i, image in enumerate(result['downloaded_images'], 1):
                logger.info(f"  {i}. {Path(image['local_path']).name} ({image['file_size']} bytes)")
        
        if result['failed_images']:
            logger.info("下载失败的图片:")
            for i, image in enumerate(result['failed_images'], 1):
                logger.info(f"  {i}. {image['url'][:50]}... (错误: {image['error']})")
        
        logger.info("="*50)


# def main():
#     """主函数 - 提供命令行接口"""
#     import argparse
    
#     parser = argparse.ArgumentParser(description='微信文章图片下载器')
#     parser.add_argument('url', help='文章URL')
#     parser.add_argument('--title', help='文章标题（可选）')
    
#     args = parser.parse_args()
    
#     # 创建下载器并执行下载
#     downloader = ImageDownloader()
#     result = downloader.download_images_from_article(args.url, args.title)
    
#     if result['success']:
#         print(f"✅ 图片下载完成！成功下载 {len(result['downloaded_images'])} 张图片")
#     else:
#         print(f"❌ 图片下载失败: {result['error']}")


if __name__ == "__main__":
    # main() 
    url = "https://mp.weixin.qq.com/s/UkRQd4dVy7lmKyek_iwu8A"
    title = "测试文章"
    downloader = ImageDownloader()
    result = downloader.download_images_from_article(url, title)
    