import json
import os
import re
from typing import List

from loguru import logger

from mdtool.entity.wx_entity import WxArticleBrief


def parse_hisotry_from_string(history_str: str) -> List[WxArticleBrief]:
    """
    解析微信公众平台导出的历史文章JSON字符串，返回WxArticleBrief列表。
    """
    main_dict = json.loads(history_str)
    # 解析publish_page字段（需先获取字符串再二次解析）
    publish_page = main_dict["publish_page"]
    if isinstance(publish_page, str):
        publish_page_dict = json.loads(publish_page)
    else:
        publish_page_dict = publish_page
    logger.debug("\n===== 统计信息 =====")
    logger.debug(f"总文章数: {publish_page_dict['total_count']}")
    logger.debug(f"已发布数: {publish_page_dict['publish_count']}")
    logger.debug(f"精选文章数: {publish_page_dict['featured_count']}")
    # 示例：提取publish_list中的文章信息
    logger.debug("===== 文章列表信息 =====")
    result = []
    for idx, publish_item in enumerate(publish_page_dict.get("publish_list", []), 1):
        # 解析publish_info字段（三层嵌套JSON）
        publish_info_str = publish_item.get("publish_info", "{}")
        if isinstance(publish_info_str, str):
            try:
                publish_info_dict = json.loads(publish_info_str)
            except Exception:
                continue
        else:
            publish_info_dict = publish_info_str
        # 提取文章基础信息
        appmsg_info_list = publish_info_dict.get("appmsg_info", [])
        if not appmsg_info_list or not isinstance(appmsg_info_list, list):
            continue
        appmsg_info = appmsg_info_list[0]
        album_info = appmsg_info.get("appmsg_album_info", {})
        album_id = str(album_info.get("id") or album_info.get("album_id") or "")
        album_title = album_info.get("title", "")
        content_url = appmsg_info.get("content_url", "")
        match = re.search(r"/s/([^/?#]+)", content_url)
        media_id = match.group(1) if match else ""
        title = appmsg_info.get("title", "")
        read_num = appmsg_info["read_num"]
        shared_numb = appmsg_info["share_num"]

        logger.debug(f"\n文章 {idx}:")
        logger.debug(f"标题: {title}")
        logger.debug(f"阅读数: {read_num}")
        logger.debug(f"分享数: {shared_numb}")
        logger.debug(f"发布时间戳: {publish_info_dict['sent_info']['time']}")
        logger.debug(f"内容链接: {content_url}")
        logger.debug(f"album_id: {album_id}")
        logger.debug(f"album_title: {album_title}")
        result.append(
            WxArticleBrief(
                media_id=media_id,
                title=title,
                tags=[],
                album_id=album_id,
                album_title=album_title,
            )
        )
    return remove_duplication(result)


def __parse_history_from_single_file(json_file_path: str) -> List[WxArticleBrief]:
    histroy_str = ""
    with open(json_file_path, "r", encoding="utf-8") as f:
        histroy_str = f.read()
    # 调试：打印前300字符的repr，便于定位问题
    print("==== 文件内容repr预览 ====")
    print(repr(histroy_str[:300]))
    return parse_hisotry_from_string(histroy_str)


def parse_history_from_dir(json_dir: str) -> List[WxArticleBrief]:
    """
    # 微信公众号用于得到已发布的文章列表

    1. 在浏览器中登录微信公众号平台(一定要在登录状态)，在地址栏中复制下token, 替换到下面的URL的 {} 中。
    1. 打开 URL <https://mp.weixin.qq.com/cgi-bin/appmsgpublish?sub=list&begin=0&count=30&token={}&lang=zh_CN&f=json&ajax=1>
    6. 复制Response
    7. 保存为json文件
    8. 修改 url 中的 begin 为 begin + count （count 为每次请求的数量，微信允许最多为 20）
    9. 重复步骤 5-8 直到获取到所有文章
    """
    # 从目录中得到所有 json 文件，并读取其中的内容,解析出来 WxArticleBrief，返回
    file_list = []
    for root, _, files in os.walk(json_dir):
        for file in files:
            if file.endswith(".json"):
                file_list.append(os.path.join(root, file))
    # 读取文件内容
    ab_list = []
    for file_path in file_list:
        ab_list.extend(__parse_history_from_single_file(file_path))
    # 结果去重
    return remove_duplication(ab_list)


def remove_duplication(origin: List[WxArticleBrief]) -> List[WxArticleBrief]:
    # 结果去重
    seen_media_ids = set()
    unique_ret = []
    for article in origin:
        if article.media_id and article.media_id not in seen_media_ids:
            unique_ret.append(article)
            seen_media_ids.add(article.media_id)
    return unique_ret
