import traceback
from crawlers.utils import fetch_json_data
from crawlers.constants import WEIBO_BASE_URL, WEIBO_HOT_SEARCH_URL, REQUEST_HEADERS


def _process_text_hashtag(text):
    """处理微博热搜中的话题标签，将话题两端的#替换为%23，若没有#，则添加%23到两端

    Parameters:
        text: 要处理的文本

    Returns:
        处理后的文本
    """
    if text.startswith("#") and text.endswith("#"):
        return text.replace("#", "%23")
    else:
        return f"%23{text}%23"


def get_hot_search_from_weibo():
    """从微博获取热搜数据

    Returns:
        热搜数据，包括置顶热搜和普通热搜

        置顶热搜包含序号、标题、链接和标签

        普通热搜包含序号、标题、链接、热度和标签

    """
    try:
      json_data = fetch_json_data(WEIBO_HOT_SEARCH_URL, REQUEST_HEADERS)
      # 置顶热搜
      hot_gov_searches = []
      hot_gov_search = {}
      hot_gov_search_json = json_data["hotgov"]
      hot_gov_search["order"] = 0
      hot_gov_search["title"] = hot_gov_search_json["word"].strip("#")
      hot_gov_search["url"] = (
          f"{WEIBO_BASE_URL}/weibo?q={_process_text_hashtag(hot_gov_search_json['word'])}"
      )
      hot_gov_search["label"] = hot_gov_search_json["icon_desc"]
      hot_gov_searches.append(hot_gov_search)
      # 热搜
      hot_searches = []
      hot_searches_json = json_data["realtime"]
      for index, item in enumerate(hot_searches_json, 1):
          if not item.get("word_scheme"):
              continue
          hot_search_info = {}
          hot_search_info["order"] = index
          hot_search_info["title"] = item["word"].strip("#")
          hot_search_info["url"] = (
              f"{WEIBO_BASE_URL}/weibo?q={_process_text_hashtag(item['word_scheme'])}"
          )
          hot_search_info["hot"] = item["num"]
          hot_search_info["label"] = item["label_name"]
          hot_searches.append(hot_search_info)
      return hot_gov_searches, hot_searches
    except Exception as e:
        print("微博热搜数据获取失败:", e)
        print("详细错误信息:")
        traceback.print_exc()