import requests
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from time import sleep
import os
from dataclasses import dataclass
from typing import Optional, List, Callable
from datetime import datetime
import concurrent.futures
import logging
import functools
import json  # 添加json模块导入

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


@dataclass
class WebPageInfo:
    """网页信息实体类"""
    mark: str  # 网页名称
    url: str  # 网页URL
    tab: str = "post"  # 标签，默认为post
    earliest: str = "2020/1/1"  # 最早时间
    latest: str = "2025/12/12"  # 最晚时间
    enable: bool = True  # 是否启用

    def to_dict(self):
        """转换为字典格式"""
        return {
            "mark": self.mark,
            "url": self.url,
            "tab": self.tab,
            "earliest": self.earliest,
            "latest": self.latest,
            "enable": self.enable
        }


def retry_on_failure(max_retries: int = 3, wait_time: int = 5):
    """
    重试装饰器，当函数执行失败时进行重试
    
    Args:
        max_retries (int): 最大重试次数
        wait_time (int): 重试间隔时间（秒）
    """

    def decorator(func: Callable):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            retries = 0
            while retries <= max_retries:
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    retries += 1
                    if retries <= max_retries:
                        logger.warning(f"第{retries}次尝试失败: {str(e)}，等待{wait_time}秒后重试...")
                        sleep(wait_time)
                    else:
                        logger.error(f"达到最大重试次数({max_retries})，执行失败: {str(e)}")
                        raise
            return None

        return wrapper

    return decorator


@retry_on_failure(max_retries=3, wait_time=5)
def get_webpage_title(url, chrome_driver_path='D:\chromedriver-win64\chromedriver.exe', wait_time=3):
    """
    使用Selenium获取网页的标题（适用于需要JavaScript渲染的动态网页）
    
    Args:
        url (str): 网页的URL地址
        chrome_driver_path (str): Chrome驱动路径
        wait_time (int): 等待网页加载的时间（秒）
        
    Returns:
        WebPageInfo: 包含网页信息的实体类对象
    """
    driver = None
    try:
        # 检查URL格式
        if not url.startswith(("http://", "https://")):
            url = "https://" + url

        # 设置Chrome选项
        chrome_options = Options()
        chrome_options.add_argument("--headless")  # 无界面模式
        chrome_options.add_argument("--disable-gpu")
        chrome_options.add_argument("--window-size=1920,1080")
        chrome_options.add_argument("--start-maximized")
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--ignore-certificate-errors')
        chrome_options.add_argument('--disable-extensions')  # 禁用扩展
        chrome_options.add_argument('--disable-popup-blocking')  # 禁用弹窗
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')  # 避免被检测为自动化工具

        # 添加用户代理
        chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")

        # 创建Chrome浏览器实例
        service = Service(chrome_driver_path)
        driver = webdriver.Chrome(service=service, options=chrome_options)
        
        # 设置页面加载超时
        driver.set_page_load_timeout(30)
        driver.set_script_timeout(30)

        # 打开网页
        driver.get(url)

        # 等待网页加载完成
        sleep(wait_time)

        # 获取网页标题
        title = driver.title

        # 尝试获取更具体的标题（如果存在的话）
        try:
            # 尝试获取H1标题
            h1_elements = driver.find_elements(By.TAG_NAME, "h1")
            if h1_elements and h1_elements[0].text.strip():
                title = h1_elements[0].text.strip()
            # 如果没有找到合适的H1，尝试其他常见的标题元素
            else:
                # 尝试获取页面主要标题（根据不同网站的结构可能需要调整）
                title_elements = driver.find_elements(By.CSS_SELECTOR, ".title, .post-title, .article-title, .entry-title, .headline")
                if title_elements and title_elements[0].text.strip():
                    title = title_elements[0].text.strip()
        except Exception as e:
            logger.warning(f"获取具体标题元素失败: {str(e)}")
            # 如果无法获取具体标题元素，就使用页面title
            pass

        # 清理标题中的多余空白字符
        if title:
            title = re.sub(r"\s+", " ", title).strip()

        # 创建并返回WebPageInfo对象
        return WebPageInfo(
            mark=title if title else "未知标题",
            url=url
        )

    except Exception as e:
        logger.error(f"获取网页标题时出错: {str(e)}")
        raise  # 抛出异常以触发重试机制

    finally:
        # 确保浏览器实例被正确关闭
        if driver:
            try:
                driver.quit()
            except Exception as e:
                logger.error(f"关闭浏览器实例时出错: {str(e)}")


def get_webpage_titles(urls: List[str], chrome_driver_path='D:\chromedriver-win64\chromedriver.exe',
                       wait_time=3, max_workers=5) -> List[WebPageInfo]:
    """
    使用线程池批量获取多个网页的标题
    
    Args:
        urls (List[str]): 网页URL地址列表
        chrome_driver_path (str): Chrome驱动路径
        wait_time (int): 每个网页等待加载的时间（秒）
        max_workers (int): 最大线程数，默认为5
        
    Returns:
        List[WebPageInfo]: WebPageInfo对象列表
    """
    results = []
    failed_urls = []  # 记录失败的URL

    # 使用线程池执行任务
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 创建任务字典，key为future对象，value为对应的url
        future_to_url = {
            executor.submit(get_webpage_title, url, chrome_driver_path, wait_time): url
            for url in urls
        }

        # 等待所有任务完成并收集结果
        for future in concurrent.futures.as_completed(future_to_url):
            url = future_to_url[future]
            try:
                webpage_info = future.result()
                results.append(webpage_info)
                logger.info(f"成功获取网页标题: {url} -> {webpage_info.mark}")
            except Exception as e:
                logger.error(f"获取网页标题失败 {url}: {str(e)}")
                failed_urls.append(url)  # 记录失败的URL
                results.append(WebPageInfo(
                    mark="获取失败",
                    url=url
                ))

    # 如果有失败的URL，记录到日志文件
    if failed_urls:
        logger.warning(f"以下URL获取失败: {failed_urls}")
        # 将失败的URL保存到文件
        try:
            with open('failed_urls.txt', 'w', encoding='utf-8') as f:
                for url in failed_urls:
                    f.write(f"{url}\n")
            logger.info("失败的URL已保存到 failed_urls.txt")
        except Exception as e:
            logger.error(f"保存失败URL到文件时出错: {str(e)}")

    return results


def save_webpage_infos_to_json(webpage_infos: List[WebPageInfo], output_dir: str = "output") -> str:
    """
    将WebPageInfo对象列表保存为JSON文件
    
    Args:
        webpage_infos (List[WebPageInfo]): WebPageInfo对象列表
        output_dir (str): 输出目录，默认为"output"
        
    Returns:
        str: 保存的文件路径
    """
    try:
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)

        # 生成带时间戳的文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"webpage_infos_{timestamp}.json"
        filepath = os.path.join(output_dir, filename)

        # 将WebPageInfo对象转换为字典列表
        data = [info.to_dict() for info in webpage_infos]

        # 保存为JSON文件
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

        logger.info(f"成功保存网页信息到文件: {filepath}")
        return filepath

    except Exception as e:
        logger.error(f"保存JSON文件时出错: {str(e)}")
        raise


if __name__ == "__main__":
    # 指定Chrome驱动路径
    chrome_driver_path = 'D:\chromedriver-win64\chromedriver.exe'

    # 测试URL列表
    test_urls = [
        "https://www.douyin.com/user/MS4wLjABAAAANSvwTSzKbPOiEzVayeMTpjguBBSEE8L50Ci6KcVmjXmIsteE-nKbuwNej1pdQjM7?from_tab_name=main",
        "https://www.douyin.com/user/MS4wLjABAAAAr5t4q_1fkciUN8WgC0dJ9qeFPx-Ri3VRaOB6Ca0YYos?from_tab_name=main",
        "https://www.douyin.com/user/MS4wLjABAAAAXQvJDxtK7fUYeDtYXx5WPHZTK5DXmg1lxAYslNgFx6VGJsyGYOHs4yA6L6Wf800-?from_tab_name=main",
        "https://www.douyin.com/user/MS4wLjABAAAAPtF2Am8ThIRy5fKaxy0ofMGK79Z0s25cBaJGpquDwiY?from_tab_name=main",
        "https://www.douyin.com/user/MS4wLjABAAAAl2wmMic3h_Jgyj2ll2yntVQdso2qgKiwKleqQ8QAM_4?from_tab_name=main&relation=1&vid=7515786533489986827",
        "https://www.douyin.com/user/MS4wLjABAAAAWkqK23yacLTN3UQX2H8d1yuFAyu2N1dFawS0rEvbs1k?from_tab_name=main&vid=7510522341027482880",
        "https://www.douyin.com/user/MS4wLjABAAAA3DDjIQovZ7IC7Z3KtDfQv19ycTR3h7b5ndQRyMOESoBiQrlF2jzpTT0T7haaFEEy?from_tab_name=main&vid=7484263414185889024",
        "https://www.douyin.com/user/MS4wLjABAAAAtjUI-aWPGL5w8GWZtY1IUj1e1oYWXJskaiHHNjJ5TTITXP870WWqOxBmtkqjJFyo?from_tab_name=main&vid=7483923932735753499",
        "https://www.douyin.com/user/MS4wLjABAAAAvO4nM26Zy-4p04GBiHfel5Wp01-xoM6izk12iQKFegyflD77D_baNHjg_9gxXNIW?from_tab_name=main&vid=7503364746012855567",
        "https://www.douyin.com/user/MS4wLjABAAAA7KxffLLearXyxzBKYZGZ_MaXzN0ZEmvyHkOtw0o6kUU?from_tab_name=main&vid=7509711293345025337",
        "https://www.douyin.com/user/MS4wLjABAAAAXSJ4uLU6shM-vCtJ60AbPWydEPCRpTi3oUx7HMG4r_trc02h1MIcH_S-sKnVSr2P?from_tab_name=main&vid=7505304947421383971",
        "https://www.douyin.com/user/MS4wLjABAAAAaYVtqwZA2_D77Yp9gIr0lKxbZvUv7E67O5g3Epw_u8g?from_tab_name=main&vid=7504968801948962107",
        "https://www.douyin.com/user/MS4wLjABAAAAEcJ2BxLo44-YsZzsUXaxB4YZY2rCKVvUjZaixaMRU_s?from_tab_name=main&vid=7494283777619791141",
        "https://www.douyin.com/user/MS4wLjABAAAALkzZWOAFg74yDrt4Sd5vdRkM75zFs9H1VyldDgA5NOYkhI2wOg71SWodQ2oC6y1s?from_tab_name=main&vid=7504149667660811559",
        "https://www.douyin.com/user/MS4wLjABAAAAA9LBXOu5j0fHgs6qfXHNs0Hlc5eiJFUQaQmXUD7hZR_pDQ70TkAw62L7BLMKkscr?from_tab_name=main&vid=7510085389446040890",
    ]

    print("\n=== 批量获取网页标题 ===")
    # 使用3个线程并行处理
    webpage_infos = get_webpage_titles(test_urls, chrome_driver_path, max_workers=20)

    # 打印结果
    for info in webpage_infos:
        print(f"\n网页信息: {info.to_dict()}")

    # 统计成功和失败的数量
    success_count = sum(1 for info in webpage_infos if info.mark != "获取失败")
    failure_count = sum(1 for info in webpage_infos if info.mark == "获取失败")
    total_count = len(webpage_infos)
    
    print(f"\n=== 统计信息 ===")
    print(f"总URL数量: {total_count}")
    print(f"成功获取数量: {success_count}")
    print(f"失败获取数量: {failure_count}")
    print(f"成功率: {(success_count/total_count)*100:.2f}%")

    # 保存结果到JSON文件
    try:
        saved_file = save_webpage_infos_to_json(webpage_infos)
        print(f"\n网页信息已保存到文件: {saved_file}")
    except Exception as e:
        print(f"\n保存文件时出错: {str(e)}")
