"""读取数据库中的url，获取html代码并保存到根目录data/html_cache，以index命名，例如1.html, 2.html"""
import os
import sys
import requests
import time
import random
from pathlib import Path
import sys
from pathlib import Path
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, WebDriverException

# 添加src目录到Python路径
src_path = Path(__file__).parent.parent / 'src'
sys.path.insert(0, str(src_path))

from db_helpler import DatabaseHelper, Contact
from utils.logger import logger


def ensure_cache_dir():
    """确保缓存目录存在"""
    # 获取项目根目录的绝对路径
    project_root = Path(__file__).parent.parent
    cache_dir = project_root / 'data' / 'html_cache'
    cache_dir.mkdir(parents=True, exist_ok=True)
    return cache_dir


def get_all_urls_from_db():
    """从数据库获取所有URL"""
    db_helper = DatabaseHelper('data/uae_contacts.db')
    session = db_helper.get_session()
    
    try:
        contacts = session.query(Contact).all()
        urls = [(contact.id, contact.url) for contact in contacts]
        logger.info(f"从数据库获取到 {len(urls)} 个URL")
        return urls
    except Exception as e:
        logger.error(f"❌ 获取URL失败: {e}")
        return []
    finally:
        session.close()
        db_helper.close()


def download_html_requests(url, file_path, timeout=30, max_retries=1):
    """使用requests下载HTML内容并保存到文件，支持重试机制"""
    # 更新的浏览器请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'Accept-Encoding': 'gzip, deflate, br',
        'DNT': '1',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Sec-Fetch-User': '?1',
        'Cache-Control': 'max-age=0'
    }
    
    for attempt in range(max_retries):
        try:
            # 添加随机延迟避免被检测
            if attempt > 0:
                delay = random.uniform(2, 5)
                logger.info(f"第{attempt + 1}次重试 {url}，等待 {delay:.1f} 秒...")
                time.sleep(delay)
            
            response = requests.get(url, headers=headers, timeout=timeout)
            response.raise_for_status()
            
            # 保存HTML内容
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(response.text)
            
            logger.info(f"✅ 成功下载(requests): {url} -> {file_path}")
            return True
            
        except requests.exceptions.RequestException as e:
            if attempt < max_retries - 1:
                logger.warning(f"⚠️ requests下载失败 {url} (尝试 {attempt + 1}/{max_retries}): {e}")
            else:
                logger.error(f"❌ requests下载失败 {url} (所有重试已用完): {e}")
        except Exception as e:
            logger.error(f"❌ 保存文件失败 {file_path}: {e}")
            return False
    
    return False


def create_selenium_driver():
    """创建selenium webdriver实例"""
    try:
        chrome_options = Options()
        # 创建临时用户数据目录避免冲突
        import tempfile
        temp_dir = tempfile.mkdtemp(prefix="chrome_selenium_")
        chrome_options.add_argument(f"--user-data-dir={temp_dir}")
        
        # 其他优化选项
        chrome_options.add_argument("--no-sandbox")
        chrome_options.add_argument("--disable-dev-shm-usage")
        chrome_options.add_argument("--disable-blink-features=AutomationControlled")
        chrome_options.add_argument("--disable-web-security")
        chrome_options.add_argument("--disable-features=VizDisplayCompositor")
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)
        
        # 设置用户代理
        chrome_options.add_argument("--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36")
        
        # 创建driver
        driver = webdriver.Chrome(options=chrome_options)
        driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        
        # 将临时目录路径存储到driver对象中，用于后续清理
        driver._temp_dir = temp_dir
        
        return driver
    except Exception as e:
        logger.error(f"❌ 创建selenium driver失败: {e}")
        return None


def download_html_selenium(url, file_path, timeout=30):
    """使用selenium下载HTML内容并保存到文件"""
    driver = None
    try:
        driver = create_selenium_driver()
        if not driver:
            return False
        
        # 设置页面加载超时
        driver.set_page_load_timeout(timeout)
        
        # 访问URL
        logger.info(f"🌐 使用selenium访问: {url}")
        driver.get(url)
        
        # 等待页面加载完成
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.TAG_NAME, "body"))
        )
        
        # 获取页面源码
        html_content = driver.page_source
        
        # 保存HTML内容
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(html_content)
        
        logger.info(f"✅ 成功下载(selenium): {url} -> {file_path}")
        return True
        
    except TimeoutException:
        logger.error(f"❌ selenium访问超时: {url}")
        return False
    except WebDriverException as e:
        logger.error(f"❌ selenium WebDriver错误 {url}: {e}")
        return False
    except Exception as e:
        logger.error(f"❌ selenium下载失败 {url}: {e}")
        return False
    finally:
        if driver:
            # 获取临时目录路径
            temp_dir = getattr(driver, '_temp_dir', None)
            
            try:
                driver.quit()
            except:
                pass
            
            # 清理临时目录
            if temp_dir and os.path.exists(temp_dir):
                try:
                    import shutil
                    shutil.rmtree(temp_dir, ignore_errors=True)
                except:
                    pass


def download_html(url, file_path, timeout=30, max_retries=1):
    """下载HTML内容，优先使用requests，失败后使用selenium"""
    # 首先尝试requests方法
    if download_html_requests(url, file_path, timeout, max_retries):
        return True
    
    # requests失败后，尝试selenium方法
    logger.info(f"🔄 requests失败，尝试使用selenium: {url}")
    return download_html_selenium(url, file_path, timeout)


def main():
    """主函数：爬取所有URL的HTML内容"""
    logger.info("开始HTML爬取任务")
    
    # 确保缓存目录存在
    cache_dir = ensure_cache_dir()
    
    # 获取所有URL
    urls = get_all_urls_from_db()
    if not urls:
        logger.warning("没有找到任何URL，退出程序")
        return
    
    success_count = 0
    failed_count = 0
    failed_urls = []  # 记录失败的URL信息
    
    for contact_id, url in urls:
        # 生成文件名：使用contact_id作为文件名
        file_name = f"{contact_id}.html"
        file_path = cache_dir / file_name
        
        # 如果文件已存在，跳过
        if file_path.exists():
            logger.info(f"文件已存在，跳过: {file_path}")
            continue
        
        # 下载HTML
        if download_html(url, file_path):
            success_count += 1
        else:
            failed_count += 1
            failed_urls.append((contact_id, url))
        
        # 添加延迟，避免请求过于频繁
        delay = random.uniform(2, 5)
        time.sleep(delay)
    
    # 最终报告
    logger.info(f"HTML爬取任务完成: 成功 {success_count} 个，失败 {failed_count} 个")
    
    if failed_urls:
        logger.warning(f"以下 {len(failed_urls)} 个URL下载失败:")
        for contact_id, url in failed_urls:
            logger.warning(f"  - ID {contact_id}: {url}")
    else:
        logger.info("所有URL都下载成功！")


if __name__ == '__main__':
    main()

