import traceback
import re
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from crawlers.utils import fetch_webpage
from crawlers.constants import REQUEST_HEADERS, ITHOME_NEW_ARTICLES_URL, ITHOME_HOT_ARTICLES_URL

def _process_time_string(time_string):
    """处理时间字符串，将其转换为标准格式

    Parameters:
        time_string: 要处理的时间字符串

    Returns:
        处理后的时间字符串
    """
    if not time_string:
        return ""
    today_pattern = re.compile(r"^\d{2}:\d{2}$")
    tody_match = today_pattern.match(time_string)
    if tody_match:
        today = datetime.now()
        time_string = f"{today.year}-{today.month:02}-{today.day:02} {time_string}"
    elif "昨日" in time_string:
        yesterday = datetime.now() - timedelta(days=1)
        time_string = f"{yesterday.year}-{yesterday.month:02}-{yesterday.day:02} {time_string.replace('昨日', '').strip()}"
    elif "月" in time_string and "日" in time_string:
        year_now = datetime.now().year
        time_string = f"{year_now}-{time_string.replace('月', '-').replace('日', '')}"
    else:
        time_string = time_string.replace(".", "-")
    return time_string


def _extract_article_info(article_element, order):
    """从文章元素中提取文章信息

    Parameters:
        article_element: 文章元素

    Returns:
        文章信息字典，包含序号、标题、链接、图片链接、评论数和创建时间
    """
    article_info = {}
    article_info["order"] = order
    article_info["title"] = article_element.select_one(".plc-con p.plc-title").text
    article_info["url"] = article_element.get("href")
    image_element = article_element.select_one(".plc-image img")
    if image_element:
        article_info["photo_url"] = image_element.get("data-original")
    else:
        article_info["photo_url"] = ""
    comment_count_element = article_element.select_one(
        ".plc-footer .plc-footer-fr .review-num"
    )
    if comment_count_element:
        article_info["comment_count"] = int(
            comment_count_element.text.replace("评", "")
        )
    else:
        article_info["comment_count"] = 0
    created_at_element = article_element.select_one(".plc-footer .post-time")
    article_info["created_at"] = _process_time_string(created_at_element.text)


    return article_info


def get_hot_articles_from_ithome():
    """从IT之家获取热文章数据

    Returns:
        日榜、周榜、月榜、热评榜 热文章
        包括标题、链接、时间和作者信息、作者链接、文章类型、简介、图片链接和关键词、关键词链接
    """
    try:
      webpage = fetch_webpage(ITHOME_HOT_ARTICLES_URL, REQUEST_HEADERS)
      soup = BeautifulSoup(webpage, "html.parser")

      articles = []
      article_elements = soup.select(".rank-box .placeholder.one-img-plc a")
      article_num = len(article_elements) // 4
      for index, element in enumerate(article_elements):
          article_info = _extract_article_info(element, index % article_num + 1)
          articles.append(article_info)
      # 日榜
      daily_articles = articles[:article_num]
      # 周榜
      weekly_articles = articles[article_num : article_num * 2]
      # 热评榜
      hot_comment_articles = articles[article_num * 2 : article_num * 3]
      # 月榜
      monthly_articles = articles[article_num * 3 : article_num * 4]

      return daily_articles, weekly_articles, monthly_articles, hot_comment_articles
    except Exception as e:
        print("IT之家热文章数据获取失败", e)
        print("详细错误信息:")
        traceback.print_exc()
        raise


def get_new_articles_from_ithome():
    """从IT之家获取最新文章数据

    Returns:
        最新文章列表
        包括标题、链接、时间和作者信息、作者链接、文章类型、简介、图片链接和关键词、关键词链接
    """
    try:
      webpage = fetch_webpage(ITHOME_NEW_ARTICLES_URL, REQUEST_HEADERS)
      soup = BeautifulSoup(webpage, "html.parser")

      new_articles = []
      article_elements = soup.select(".content .placeholder.one-img-plc a")
      for index, element in enumerate(article_elements, 1):
          ad = element.select_one(".plc-footer .tip")
          if ad and "广告" in ad.text:
              continue
          article_info = _extract_article_info(element, index)
          new_articles.append(article_info)

      return new_articles
    except Exception as e:
        print("IT之家最新文章数据获取失败", e)
        print("详细错误信息:")
        traceback.print_exc()
        raise