from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from time import sleep
import re
import traceback
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from crawlers.constants import (
    KR_BASE_URL,
    KR_BOOKMARK_ARTICLES_BASE_URL,
    KR_HOT_ARTICLES_BASE_URL,
    KR_NEW_FLASH_URL,
    KR_WEB_NEWS_URL,
    KR_ZONGHE_ARTICLES_BASE_URL,
    CHROME_DRIVER_PATH
)

def _fetch_webpage_by_selenium(url):
    option = webdriver.ChromeOptions()
    # 启用无头模式，隐藏浏览器窗口
    option.add_argument("--headless")
    # 安全性设置
    option.add_argument("--no-sandbox")
    # 解决内存不足问题
    option.add_argument("--disable-dev-shm-usage")
    # 禁用GPU加速
    option.add_argument("--disable-gpu")

    # 设置用户代理
    option.add_argument(
        "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
    )

    # 禁用webdriver标志，提高反爬能力
    option.add_experimental_option("excludeSwitches", ["enable-automation"])
    option.add_experimental_option("useAutomationExtension", False)

    service = Service(CHROME_DRIVER_PATH)
    browser = webdriver.Chrome(service=service, options=option)

    try:
        browser.get(url)

        # 等待页面加载，最多等待20秒，直到出现一个img.scaleBig元素出现
        WebDriverWait(browser, 20).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "img.scaleBig"))
        )

        # 持续滚动直到找到至少40个img.scaleBig元素，或者达到最大滚动次数
        # 最大滚动次数
        max_scroll_attempts = 10
        # 目标元素数量
        target_elements_count = 40
        current_attempt = 0

        # 分段滚动策略
        while current_attempt < max_scroll_attempts:
            # 当前页面高度
            total_height = browser.execute_script("return document.body.scrollHeight")

            # 分段滚动到页面底部
            for step in range(1, 4):
                # 计算目标滚动位置 (按百分比滚动)
                target_position = (total_height * step) // 3
                browser.execute_script(f"window.scrollTo(0, {target_position});")
                sleep(1.5)

                # 检查元素个数
                current_count = len(
                    browser.find_elements(By.CSS_SELECTOR, "img.scaleBig")
                )

                if current_count >= target_elements_count:
                    return browser.page_source

            # 检测页面高度是否变化以确定是否到达底部
            new_height = browser.execute_script("return document.body.scrollHeight")
            if new_height == total_height:
                break

            current_attempt += 1

        return browser.page_source
    except Exception as e:
        print("获取页面失败:", e)
        print("详细错误信息:")
        traceback.print_exc()
    finally:
        browser.quit()

def _process_created_at(time_str:str):
    """处理热搜日期，将其转换为正确时间格式

    Parameters:
        time_str: 获取的原始时间数据

    Returns:
        处理后的时间数据
    """
    now = datetime.now()
    time_str = time_str.strip()

    # 处理相对时间
    if "秒前" in time_str:
        seconds = int(time_str.replace("秒前", ""))
        return (now - timedelta(seconds=seconds)).strftime("%Y-%m-%d %H:%M")
    elif "分钟前" in time_str:
        minutes = int(time_str.replace("分钟前", ""))
        return (now - timedelta(minutes=minutes)).strftime("%Y-%m-%d %H:%M")
    elif "小时前" in time_str:
        hours = int(time_str.replace("小时前", ""))
        return (now - timedelta(hours=hours)).strftime("%Y-%m-%d %H:%M")
    # 处理绝对时间
    elif "-" in time_str:
        return time_str

    # 如果无法解析，返回当前时间
    return now.strftime("%Y-%m-%d %H:%M")


def _processUrl(url:str):
    if not url:
        return ""
    if url.startswith("http"):
        return url
    return KR_BASE_URL + url

def _extract_article_info_from_36kr(element: BeautifulSoup, element_type, order):
    """从36kr的元素中提取文章信息，根据元素类型，提取内容有所不同

    Parameters:
        element: 要提取信息的元素
        element_type: 元素类型，可能是new (最新资讯)、hot (人气榜)、 bookmark (收藏榜)、zonghe (综合榜)
        order: 文章序号
    Returns:
        文章信息字典，包含序号、标题、链接、图片链接、评论数和创建时间
    """
    if not element:
        return {}
    article_info = {}
    article_info["order"] = order
    title_element = element.select_one(".article-item-title.weight-bold")
    article_info["title"] = title_element.text
    article_info["url"] = _processUrl(title_element.get("href", ""))
    image_element = element.select_one("img.scaleBig")
    article_info["photo_url"] = image_element.get("src") if image_element else ""
    article_info["intro"] = element.select_one(".article-item-description").text
    author_element = element.select_one(".kr-flow-bar-author")
    article_info["authors"] = [
        {
            "url": _processUrl(author_element.get("href")),
            "name": author_element.text,
        }
    ]
    if element_type == "new":
        article_info["created_at"] = _process_created_at(
            element.select_one(".kr-flow-bar-time").text
        )
        theme_element = element.select_one(".kr-flow-bar-motif a")
        article_info["themes"] = [
            {
                "url": _processUrl(theme_element.get("href")),
                "name": theme_element.text,
            }
        ]
        keyword_element = element.select_one(".article-item-pic-wrapper a")
        article_info["keywords"] = [
            {
                "url": _processUrl(keyword_element.get("href")),
                "name": keyword_element.text,
            }
        ]
    elif element_type == "hot":
        hot_element = element.select_one(".kr-flow-bar-hot span")
        pattern = re.compile(r"(?P<hot>\d+(\.\d+)?)")
        match = pattern.search(hot_element.text)
        article_info["hot"] = float(match.group("hot")) if match else 0
    else:
        collect_element = element.select_one(".kr-flow-bar-collect span")
        # 收藏数有可能不存在
        if collect_element:
            pattern = re.compile(r"\d+")
            match = pattern.search(collect_element.text)
            article_info["bookmarks"] = int(match.group()) if match else 0
        else:
            article_info["bookmarks"] = 0

    return article_info

def _get_ranked_articles_from_36kr(rank_type):
    """从36kr获取指定类型榜单文章数据

    Parameters:
        rank_type: 榜单类型，可以是 "zonghe"(综合榜)、"bookmark"(收藏榜)或"hot"(人气榜)

    Returns:
        榜单文章列表
    """
    # 榜单类型映射
    type_info = {
        "zonghe": {
            "base_url": KR_ZONGHE_ARTICLES_BASE_URL,
            "error_msg": "36kr综合榜数据获取失败",
            "element_type": "zonghe",
        },
        "bookmark": {
            "base_url": KR_BOOKMARK_ARTICLES_BASE_URL,
            "error_msg": "36kr收藏榜数据获取失败",
            "element_type": "bookmark",
        },
        "hot": {
            "base_url": KR_HOT_ARTICLES_BASE_URL,
            "error_msg": "36kr人气榜数据获取失败",
            "element_type": "hot",
        },
    }

    config = type_info.get(rank_type)
    if not config:
        raise ValueError(f"不支持的榜单类型: {rank_type}")

    try:
        date_string = datetime.now().strftime("%Y-%m-%d")
        first_page_url = f"{config['base_url']}{date_string}/1"
        webpage = _fetch_webpage_by_selenium(first_page_url)
        soup = BeautifulSoup(webpage, "html.parser")

        articles = []
        page_count = len(soup.select(".page-count"))

        order = 1
        for page_num in range(1, page_count + 1):
            if page_num > 1:
                page_url = f"{config['base_url']}{date_string}/{page_num}"
                webpage = _fetch_webpage_by_selenium(page_url)
                soup = BeautifulSoup(webpage, "html.parser")

            article_elements = soup.select(".kr-shadow-content")

            for element in article_elements:
                article_info = _extract_article_info_from_36kr(
                    element, config["element_type"], order
                )
                order += 1
                articles.append(article_info)

            sleep(1)

        return articles

    except Exception as e:
        print(f"{config['error_msg']}", e)
        print("详细错误信息:")
        traceback.print_exc()
        raise


def get_web_news_from_36kr():
    """从36kr获取最新资讯榜数据

    Returns:
        最新资讯榜列表，每个最新资讯包含类型、类型链接、标题、链接、图片链接、前言、主题、主题链接、作者、作者链接、时间
    """
    try:
        webpage = _fetch_webpage_by_selenium(KR_WEB_NEWS_URL)
        soup = BeautifulSoup(webpage, "html.parser")
        web_news = []
        web_news_elements = soup.select(".information-flow-item")

        for index, element in enumerate(web_news_elements, 1):
            if not element.select("img.scaleBig"):
                # 如果没有图片链接，则跳过该元素
                continue
            web_news_info = _extract_article_info_from_36kr(element, "new", index)
            web_news.append(web_news_info)

        return web_news
    except Exception as e:
        print("36kr快讯榜数据获取失败", e)
        print("详细错误信息:")
        traceback.print_exc()
        raise

def get_new_flash_from_36kr():
    """从36kr获取快讯榜数据

    Returns:
        快讯数据列表，每个快讯包含标题、链接、时间、内容、原文链接
    """
    try:
        webpage = _fetch_webpage_by_selenium(KR_NEW_FLASH_URL)
        soup = BeautifulSoup(webpage, "html.parser")
        new_flashes = []
        new_flashes_elements = soup.select(".flow-item")

        for index, element in enumerate(new_flashes_elements):
            new_flash_info = {}

            new_flash_info["order"] = index + 1
            title_element = element.select_one(".item-title")
            new_flash_info["title"] = title_element.text
            new_flash_info["url"] = _processUrl(title_element.get("href"))
            create_at_element = element.select_one(".time")
            new_flash_info["created_at"] = _process_created_at(create_at_element.text)
            intro_element = element.select_one(".item-desc span")
            new_flash_info["intro"] = intro_element.text
            original_link_element = element.select_one(".link")
            new_flash_info["original_link"] = (
                original_link_element.get("href") if original_link_element else ""
            )

            new_flashes.append(new_flash_info)

        return new_flashes
    except Exception as e:
        print("36kr快讯数据获取失败", e)
        print("详细错误信息:")
        traceback.print_exc()
        raise


def get_zonghe_articles_from_36kr():
    """从36kr获取综合榜文章数据

    Returns:
        综合榜文章列表，包含顺序、标题、链接、图片链接、前言、收藏数、作者、作者链接、收藏数
    """
    return _get_ranked_articles_from_36kr("zonghe")


def get_bookmark_articles_from_36kr():
    """从36kr获取热度收藏榜数据

    Returns:
        收藏榜文章列表，包含顺序、标题、链接、图片链接、前言、收藏数、作者、作者链接、收藏数
    """
    return _get_ranked_articles_from_36kr("bookmark")


def get_hot_articles_from_36kr():
    """从36kr获取热度人气榜数据

    Returns:
        热度人气榜列表，每个热度人气榜包含顺序、标题、链接、图片链接、前言、人气数、作者、作者链接
    """
    return _get_ranked_articles_from_36kr("hot")
