import os
from flask import Flask, request, jsonify, send_from_directory, send_file
import requests
from bs4 import BeautifulSoup
import urllib.parse
import time
import logging
from datetime import datetime
import concurrent.futures
import multiprocessing
from threading import Lock
import re
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
import atexit
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

app = Flask(__name__, static_folder='static')

# 设置项目根目录和下载目录的路径
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DOWNLOADS_DIR = os.path.join(PROJECT_ROOT, 'downloads')
LOGS_DIR = os.path.join(PROJECT_ROOT, 'logs')

# 确保必要的目录存在
os.makedirs(DOWNLOADS_DIR, exist_ok=True)
os.makedirs(LOGS_DIR, exist_ok=True)  # 确保日志目录存在

# 全局变量
chrome_driver = None
driver_lock = Lock()  # 添加线程锁以确保线程安全

# 配置日志
def setup_logger():
    # 创建日志文件名（包含日期）
    log_filename = os.path.join(LOGS_DIR, f'app_{datetime.now().strftime("%Y%m%d")}.log')
    
    # 配置日志格式
    formatter = logging.Formatter(
        '%(asctime)s [%(levelname)s] %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    
    # 文件处理器
    file_handler = logging.FileHandler(log_filename, encoding='utf-8')
    file_handler.setFormatter(formatter)
    
    # 控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    
    # 配置根日志记录器
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    
    # 清除现有的处理器（避免重复）
    logger.handlers.clear()
    
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)
    
    return logger

# 创建全局 logger 对象
logger = setup_logger()

def get_chrome_options():
    """获取 Chrome 驱动选项"""
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--disable-software-rasterizer')
    chrome_options.add_argument('--disable-extensions')
    chrome_options.add_argument('--disable-logging')
    chrome_options.add_argument('--disable-plugins')
    chrome_options.add_argument('--log-level=3')
    chrome_options.add_argument('--silent')
    chrome_options.add_argument('--window-size=1920,1080')
    chrome_options.add_argument('--disable-infobars')
    chrome_options.add_argument('--disable-notifications')
    chrome_options.add_argument('--disable-usb-keyboard-detect')
    chrome_options.add_argument('--disable-usb-devices-checks')
    chrome_options.add_argument('--disable-dev-tools')
    # prefs = {
    #     'profile.managed_default_content_settings.images': 2
    # }
    # chrome_options.add_experimental_option('prefs', prefs)
    chrome_options.add_experimental_option('excludeSwitches', [
        'enable-logging',
        'enable-automation',
        'ignore-certificate-errors'
    ])
    return chrome_options


def init_chrome_driver(page_load_timeout=60):
    """初始化 Chrome 驱动"""
    global chrome_driver
    try:
        logger.info('正在初始化 Chrome 驱动...')
        chrome_options = get_chrome_options()
        
        # 设置环境变量来抑制警告
        os.environ['WDM_LOG_LEVEL'] = '0'
        os.environ['WDM_PRINT_FIRST_LINE'] = 'False'
        
        # 初始化驱动
        chrome_driver = webdriver.Chrome(
            service=Service(ChromeDriverManager().install()),
            options=chrome_options
        )

        # 设置页面加载超时时间
        chrome_driver.set_page_load_timeout(page_load_timeout)
        
        logger.info('Chrome 驱动初始化成功')
        return True
        
    except Exception as e:
        logger.error(f'Chrome 驱动初始化失败: {str(e)}')
        chrome_driver = None
        return False

@app.route('/')
def index():
    return send_from_directory('.', 'index.html')

@app.route('/favicon.ico')
def favicon():
    return send_from_directory(app.static_folder, 'favicon.ico')

def clean_filename(filename, is_folder=False):
    """清理文件名或文件夹名，移除非法字符"""
    invalid_chars = '<>:"/\\|?*\n\r\t'
    if is_folder:
        invalid_chars += '[]()（）《》【】『』「」［］〈〉{}！!@#$%^&*=+'
    for char in invalid_chars:
        filename = filename.replace(char, '_').replace(' ', '')
    filename = filename.strip('. ')
    if len(filename) > 100:
        filename = filename[:100]
    if not is_folder:
        if '.' not in filename:
            filename += '.jpg'
        valid_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp']
        file_ext = os.path.splitext(filename)[1].lower()
        if file_ext not in valid_extensions:
            filename += '.jpg'
    return filename or ('untitled_folder' if is_folder else 'untitled.jpg')

def format_size(bytes):
    """格式化文件大小"""
    for unit in ['B', 'KB', 'MB', 'GB']:
        if bytes < 1024:
            return f"{bytes:.2f} {unit}"
        bytes /= 1024
    return f"{bytes:.2f} TB"

def fetch_with_soup(url, retries=3, backoff_factor=0.3):
    """使用 BeautifulSoup 获取图片"""
    session = requests.Session()
    retry = Retry(
        total=retries,
        read=retries,
        connect=retries,
        backoff_factor=backoff_factor,
        status_forcelist=[500, 502, 503, 504, 567]
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    
    try:
        logger.info('使用 BeautifulSoup 获取页面内容...')
        response = session.get(url, headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }, timeout=10)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.text, 'html.parser')
        page_title = soup.find('title').text.strip() if soup.find('title') else 'untitled'
        
        image_urls = set()
        for img in soup.find_all('img'):
            for attr in ['src', 'data-src', 'data-original', 'data-original-src']:
                if img.get(attr):
                    image_urls.add(img[attr])
            if img.get('srcset'):
                for src in img['srcset'].split(','):
                    url = src.strip().split()[0]
                    image_urls.add(url)
        for source in soup.find_all('source'):
            if source.get('srcset'):
                for src in source['srcset'].split(','):
                    url = src.strip().split()[0]
                    image_urls.add(url)
        
        return image_urls, page_title
    
    except requests.exceptions.HTTPError as e:
        logger.error(f'HTTP 错误: {e.response.status_code} {e.response.reason} for url: {url}')
        raise
    except Exception as e:
        logger.error(f'BeautifulSoup 解析失败: {str(e)}')
        raise

def fetch_with_selenium(url):
    """使用 Selenium 获取图片"""
    global chrome_driver
    
    try:
        logger.info('使用 Selenium 获取页面内容...')
        if chrome_driver is None:
            if not init_chrome_driver():
                raise Exception('Chrome 驱动初始化失败')
        
        chrome_driver.get(url)
        time.sleep(3)
        
        page_title = chrome_driver.title
        image_urls = set()
        
        js_image_urls = chrome_driver.execute_script("""
            const images = new Set();
            document.querySelectorAll('img').forEach(img => {
                if (img.src) images.add(img.src);
                if (img.dataset.src) images.add(img.dataset.src);
                if (img.srcset) {
                    img.srcset.split(',').forEach(src => {
                        images.add(src.trim().split(' ')[0]);
                    });
                }
            });
            document.querySelectorAll('*').forEach(el => {
                const style = window.getComputedStyle(el);
                const bg = style.backgroundImage;
                if (bg && bg !== 'none') {
                    const match = bg.match(/url\(['"]?(.*?)['"]?\)/);
                    if (match) images.add(match[1]);
                }
            });
            return Array.from(images);
        """)
        
        image_urls.update(js_image_urls)
        return image_urls, page_title
        
    except Exception as e:
        logger.error(f'Selenium 解析失败: {str(e)}')
        raise

@app.route('/fetch-images', methods=['POST'])
def fetch_images():
    data = request.get_json()
    url = data.get('url')
    method = data.get('method', 'soup')
    
    if not url:
        logger.warning('未提供URL')
        return jsonify({'message': '请提供有效的网址'}), 400

    logger.info(f'开始分析网址: {url}，使用方法: {method}')
    start_time = time.time()

    try:
        if method == 'selenium':
            image_urls, page_title = fetch_with_selenium(url)
        else:
            image_urls, page_title = fetch_with_soup(url)
        
        absolute_urls = []
        for img_url in image_urls:
            if img_url and not img_url.startswith('data:'):
                if not img_url.startswith(('http://', 'https://')):
                    img_url = urllib.parse.urljoin(url, img_url)
                absolute_urls.append(img_url)
        
        logger.info(f'找到 {len(absolute_urls)} 个图片 URL')
        
        images = []
        for img_url in absolute_urls:
            images.append({
                'url': img_url,
                'filename': clean_filename(os.path.basename(img_url))
            })

        end_time = time.time()
        process_time = end_time - start_time
        logger.info(f'分析完成，耗时: {process_time:.2f} 秒')

        return jsonify({
            'images': images,
            'domain': urllib.parse.urlparse(url).netloc,
            'title': page_title
        }), 200

    except Exception as e:
        logger.error(f'分析网页时发生错误: {str(e)}', exc_info=True)
        return jsonify({'message': f'发生错误: {e}'}), 500

def download_single_image(args):
    """单个图片下载函数"""
    url, filepath, index, total = args
    try:
        if os.path.exists(filepath):
            file_size = os.path.getsize(filepath)
            logger.info(f'- 文件已存在 [{index}/{total}]: {os.path.basename(filepath)} ({format_size(file_size)})')
            return {
                'success': True,
                'url': url,
                'size': file_size,
                'filename': os.path.basename(filepath),
                'skipped': True
            }

        response = requests.get(url, timeout=30)
        response.raise_for_status()
        
        temp_filepath = filepath + '.tmp'
        with open(temp_filepath, 'wb')as f:
            f.write(response.content)
        
        os.rename(temp_filepath, filepath)
        
        file_size = len(response.content)
        logger.info(f'- 成功下载 [{index}/{total}]: {os.path.basename(filepath)} ({format_size(file_size)})')
        
        return {
            'success': True,
            'url': url,
            'size': file_size,
            'filename': os.path.basename(filepath),
            'skipped': False
        }
    except requests.RequestException as e:
        logger.error(f'- 下载失败 [{index}/{total}]: {url}')
        logger.error(f'- 错误信息: {str(e)}')
        return {
            'success': False,
            'url': url,
            'error': str(e)
        }
    except Exception as e:
        logger.error(f'- 下载时发生未知错误 [{index}/{total}]: {url}')
        logger.error(f'- 错误信息: {str(e)}')
        return {
            'success': False,
            'url': url,
            'error': str(e)
        }
    finally:
        if os.path.exists(filepath + '.tmp'):
            try:
                os.remove(filepath + '.tmp')
            except:
                pass

@app.route('/download-selected', methods=['POST'])
def download_selected():
    data = request.get_json()
    urls = data.get('urls', [])
    domain = data.get('domain', '')
    page_title = data.get('title', 'untitled')

    if not urls or not domain:
        logger.warning('下载请求缺少必要参数')
        return jsonify({'message': '请提供有效的下载信息'}), 400

    logger.info(f'开始下载任务:')
    logger.info(f'- 域名: {domain}')
    logger.info(f'- 页面标题: {page_title}')
    logger.info(f'- 待下载图片数: {len(urls)}')
    
    start_time = time.time()

    try:
        domain_dir = os.path.join(DOWNLOADS_DIR, clean_filename(domain, True))
        page_dir = os.path.join(domain_dir, clean_filename(page_title, True))
        os.makedirs(page_dir, exist_ok=True)
        logger.info(f'创建下载目录: {page_dir}')

        download_args = []
        for index, url in enumerate(urls, 1):
            filename = clean_filename(os.path.basename(url))
            filepath = os.path.join(page_dir, filename)
            download_args.append((url, filepath, index, len(urls)))

        max_workers = max(1, multiprocessing.cpu_count() - 1)
        logger.info(f'使用 {max_workers} 个线程进行下载')

        results = []
        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_url = {executor.submit(download_single_image, args): args[0] 
                           for args in download_args}
            
            for future in concurrent.futures.as_completed(future_to_url):
                url = future_to_url[future]
                try:
                    result = future.result()
                    results.append(result)
                except Exception as e:
                    logger.error(f'任务执行失败: {url}, 错误: {str(e)}')
                    results.append({
                        'success': False,
                        'url': url,
                        'error': str(e)
                    })

        successful = [r for r in results if r['success']]
        failed = [r for r in results if not r['success']]
        skipped = [r for r in successful if r.get('skipped', False)]
        new_downloads = [r for r in successful if not r.get('skipped', False)]
        total_size = sum(r['size'] for r in successful)

        end_time = time.time()
        process_time = end_time - start_time
        
        summary = f'''下载任务完成:
- 总耗时: {process_time:.2f} 秒
- 成功: {len(successful)} 个
- 新下载: {len(new_downloads)} 个
- 已存在: {len(skipped)} 个
- 失败: {len(failed)} 个
- 总大小: {format_size(total_size)}
- 平均速度: {format_size(total_size/process_time)}/s
- 保存位置: {page_dir}'''
        
        logger.info(summary)

        if failed:
            logger.warning('以下图片下载失败:')
            for fail in failed:
                logger.warning(f"- {fail['url']}: {fail['error']}")

        return jsonify({
            'message': f'完成下载任务 (新下载: {len(new_downloads)}，已存在: {len(skipped)}，失败: {len(failed)})',
            'downloaded': [r['filename'] for r in new_downloads],
            'skipped': [r['filename'] for r in skipped],
            'failed': [r['url'] for r in failed],
            'summary': summary
        }), 200
    except Exception as e:
        logger.error(f'下载过程中发生错误: {str(e)}', exc_info=True)
        return jsonify({'message': f'下载出错: {e}'}), 500

def cleanup():
    global chrome_driver
    if chrome_driver:
        try:
            chrome_driver.quit()
        except:
            pass
        chrome_driver = None

atexit.register(cleanup)

if __name__ == '__main__':
    os.makedirs(DOWNLOADS_DIR, exist_ok=True)
    init_chrome_driver(page_load_timeout=700)
    app.run(host='0.0.0.0', port=5000)