import os
import time
import requests
import redis
import logging
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List
from datetime import datetime
import json
import base64
import hashlib
from webdriver_manager.chrome import ChromeDriverManager

# 全局变量
driver = None
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)

# 设置项目根目录和下载目录的路径
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DOWNLOADS_DIR = os.path.join(PROJECT_ROOT, '..', 'downloads')
LOGS_DIR = os.path.join(PROJECT_ROOT, '..', 'logs')

# 确保必要的目录存在
os.makedirs(DOWNLOADS_DIR, exist_ok=True)
os.makedirs(LOGS_DIR, exist_ok=True)  # 确保日志目录存在

# 配置日志
def setup_logger():
    """配置日志记录器"""
    log_filename = os.path.join(
        LOGS_DIR, f'downloader_{datetime.now().strftime("%Y%m%d")}.log')
    os.makedirs(os.path.dirname(log_filename), exist_ok=True)

    formatter = logging.Formatter(
        '%(asctime)s [%(levelname)s] %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )

    file_handler = logging.FileHandler(log_filename, encoding='utf-8')
    file_handler.setFormatter(formatter)

    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)

    logger = logging.getLogger('downloader')
    logger.setLevel(logging.INFO)
    logger.handlers.clear()
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)

    return logger

# 创建全局 logger 对象
logger = setup_logger()

def init_selenium_driver():
    """初始化 Selenium 驱动"""
    global driver
    options = Options()
    options.add_argument('--headless')
    options.add_argument('--no-sandbox')
    options.add_argument('--disable-dev-shm-usage')
    options.add_argument('--disable-gpu')
    options.add_argument('--disable-software-rasterizer')
    options.add_argument('--disable-extensions')
    options.add_argument('--disable-logging')
    options.add_argument('--log-level=3')
    options.add_argument('--silent')
    options.add_argument('--window-size=1920,1080')
    options.add_argument('--disable-infobars')
    options.add_argument('--disable-notifications')
    options.add_argument('--disable-usb-keyboard-detect')
    options.add_argument('--disable-usb-devices-checks')
    options.add_argument('--disable-dev-tools')
    options.add_argument(
        'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36')
    options.add_experimental_option('excludeSwitches', [
        'enable-logging',
        'enable-automation',
        'ignore-certificate-errors'
    ])
    # options.page_load_strategy = 'eager'  # 设置页面加载策略为 eager

    # 初始化驱动
    driver = webdriver.Chrome(
        service=Service(ChromeDriverManager().install()),
        options=options
    )

    logger.info('Selenium 驱动已初始化')

def create_download_directory(domain: str, title: str) -> str:
    """创建下载目录"""
    download_path = os.path.join(DOWNLOADS_DIR, domain, title)
    os.makedirs(download_path, exist_ok=True)
    logger.info(f'创建下载目录: {download_path}')
    return download_path

def update_progress(task_id: str, total_images: int) -> None:
    """更新下载进度"""
    completed = int(redis_client.hget(task_id, 'completed') or 0)
    failed = int(redis_client.hget(task_id, 'failed') or 0)
    progress = int((completed + failed) / total_images * 100)
    redis_client.hset(task_id, 'progress', progress)

def download_image(url: str, download_path: str, index: int, task_id: str, total_images: int, retries: int = 3) -> None:
    """下载单个图片"""
    image_name = os.path.join(download_path, f'image_{index + 1}.jpg')
    if os.path.exists(image_name):
        logger.info(f'图片已存在，跳过下载: {image_name}')
        redis_client.hincrby(task_id, 'completed', 1)
        update_progress(task_id, total_images)
        return

    if url.startswith('data:image'):
        # 处理 base64 图片
        try:
            header, encoded = url.split(',', 1)
            data = base64.b64decode(encoded)
            with open(image_name, 'wb') as f:
                f.write(data)
            logger.info(f'下载成功: {image_name}')
            redis_client.hincrby(task_id, 'completed', 1)
            update_progress(task_id, total_images)
        except Exception as e:
            logger.error(f'下载 base64 图片错误 {url}: {e}')
            redis_client.hincrby(task_id, 'failed', 1)
            redis_client.hset(task_id, 'status', 'failed')
            update_progress(task_id, total_images)
        return

    for attempt in range(retries):
        try:
            response = requests.get(url)
            if response.status_code == 200:
                with open(image_name, 'wb') as f:
                    f.write(response.content)
                logger.info(f'下载成功: {image_name}')
                redis_client.hincrby(task_id, 'completed', 1)
                update_progress(task_id, total_images)
                return
            else:
                logger.warning(
                    f'下载失败: {url} (状态码: {response.status_code})')
        except Exception as e:
            logger.error(f'下载错误 {url}: {e}')
        time.sleep(1)  # 避免过度请求服务器
    logger.error(f'下载失败 {url} 在 {retries} 次尝试后')

    redis_client.hincrby(task_id, 'failed', 1)
    redis_client.hset(task_id, 'status', 'failed')
    update_progress(task_id, total_images)

def download_images(image_urls: List[str], download_path: str, task_id: str) -> None:
    """下载所有图片"""
    total_images = len(image_urls)
    redis_client.hmset(
        task_id, {'total': total_images, 'completed': 0, 'failed': 0, 'status': 'in_progress'})
    logger.info(f'开始下载 {total_images} 张图片')
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = [executor.submit(download_image, url, download_path, index, task_id, total_images)
                   for index, url in enumerate(image_urls)]
        for future in as_completed(futures):
            future.result()
    logger.info(f'完成图片下载任务 {task_id}')
    redis_client.hset(task_id, 'status', 'completed')
    redis_client.hset(task_id, 'progress', 100)

def fetch_image_urls(webpage_url: str) -> List[str]:
    """获取网页中的图片 URL"""
    global driver
    try:
        driver.get(webpage_url)
        # 使用显式等待等待页面加载完成
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.TAG_NAME, 'img'))
        )
        logger.info(f'从 {webpage_url} 获取图片 URL')

        image_elements = driver.find_elements(By.TAG_NAME, 'img')
        image_urls = [img.get_attribute(
            'src') for img in image_elements if img.get_attribute('src')]

        logger.info(f'找到 {len(image_urls)} 个图片 URL')
        return image_urls
    except Exception as e:
        logger.error(f'加载页面 {webpage_url} 时出错: {e}')
        return []

def download_from_webpage(webpage_url: str, task_id: str) -> None:
    """从网页下载图片"""
    domain = webpage_url.split("//")[-1].split("/")[0]
    title = webpage_url.rstrip('/').split("/")[-1] if webpage_url.rstrip('/').split("/")[-1] else "index"
    download_path = create_download_directory(domain, title)

    image_urls = fetch_image_urls(webpage_url)
    download_images(image_urls, download_path, task_id)
    redis_client.hset(task_id, 'progress', 100)
    logger.info(f'完成下载任务 {task_id}，网址: {webpage_url}')

    # 存储下载记录
    download_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    total_size = sum(os.path.getsize(os.path.join(download_path, f)) for f in os.listdir(download_path))
    download_record = {
        'task_id': task_id,
        'url': webpage_url,
        'domain': domain,
        'title': title,
        'total_images': len(image_urls),
        'total_size': total_size,
        'download_time': download_time
    }

    # 检查记录是否已经存在
    existing_records = redis_client.lrange('download_records', 0, -1)
    for record in existing_records:
        if json.loads(record).get('task_id') == task_id:
            redis_client.lrem('download_records', 0, record)
            break

    redis_client.lpush('download_records', json.dumps(download_record))
    redis_client.ltrim('download_records', 0, 9)  # 只保留最近 10 条记录

def generate_task_id(url: str) -> str:
    """生成基于 URL 的唯一 task_id"""
    return hashlib.md5(url.encode()).hexdigest()
