import time
import json
from typing import List, Optional, Dict, Any

import execjs
import requests

# 你的淘宝 cookies，放外部可动态切换
cookies = {
    "cna": "aOGMIKFiNEACAbcR5E3KwR03",
    "lid": "%E5%B9%B2%E6%8B%94%E9%BE%99",
    "isg": "BC0t-DgnOdC4O908PKd0RGZuPMmnimFcLmkdbm8yaUQz5k2YN9pxLHu10LoA5nkU",
    "dnk": "%5Cu8D85%5Cu7A7A%5Cu95F4%5Cu5C0F%5Cu9F99",
    "tracknick": "%5Cu5E72%5Cu62D4%5Cu9F99",
    "_l_g_": "Ug%3D%3D",
    "unb": "4141399939",
    "lgc": "%5Cu5E72%5Cu62D4%5Cu9F99",
    "cookie1": "B0T%2BEM1LhmWcZGi1HlADpXYvm4s8KTR0cZgBcij2UPE%3D",
    "login": "true",
    "wk_cookie2": "11cc21be3691e3bef7e31f00b2097304",
    "cookie17": "Vy0XH%2FvdtWEupw%3D%3D",
    "_nk_": "%5Cu5E72%5Cu62D4%5Cu9F99",
    "cancelledSubSites": "empty",
    "sg": "%E9%BE%999c",
    "t": "bbf9a9d29ae40645091f42218000259d",
    "sn": "",
    "_tb_token_": "ede147eeee5d7",
    "wk_unb": "Vy0XH%2FvdtWEupw%3D%3D",
    "mtop_partitioned_detect": "1",
    "_m_h5_tk": "1874536d7ac08d0f56fd6b724c551a4d_1753879960244",
    "_m_h5_tk_enc": "cbcb171e9d405317e201ce65dd411980",
    "havana_sdkSilent": "1753900123254",
    "uc1": "cookie15=W5iHLLyFOGW7aA%3D%3D&cookie21=U%2BGCWk%2F7pY%2FF&existShop=false&pas=0&cookie14=UoYbyGQqhHZl2Q%3D%3D&cookie16=VFC%2FuZ9az08KUQ56dCrZDlbNdA%3D%3D",
    "uc3": "vt3=F8dD2fntbUnrpJ4CGsE%3D&id2=Vy0XH%2FvdtWEupw%3D%3D&lg2=UtASsssmOIJ0bQ%3D%3D&nk2=2%2Bo4E1g7",
    "uc4": "id4=0%40VXqZjUbeE1ouuElN8zTNW3sZYpPL&nk4=0%402YSBwsKq94cCPRvQ75h8b80%3D",
    "havana_lgc_exp": "1784975323255",
    "cookie2": "2a1494b387b4a2d5bf7046f6e68c8544",
    "csg": "7a69d063",
    "sgcookie": "E100k/SYZq2F3raIATiqocqAZTbSnfqbd0as5lZ5N2mkCx2kIHqivaJQpY7VBgxNuWhTCx9w5xrRDox13M05o/gOT5hIs6nvNwzI8qDlBpfUc2k=",
    "tfstk": "gXosh-mnq1f6xVZAhPvFVZq6kZrflp-y5twxExINHlEThiMoGArqHSobcXcLkcrZQXtjEbaakmPqcOmINiSwsFDAcorvaQ-y4ADamodrXPywl12zB-K4k-QLj-k_QQsp4ADGpO1vU2KPhnXAarFxMPUdpRPdDRUtMyILnWITHGeA9pN39RBTWrIdJRyPBihYDpMLt-2TDPhYvpwmM867hFNiCLTZQ8fOXVDTOiIxXn4TNFF3KJoCPPFSBc1O6E2_57HTOCdBLqaKnricniaZB4c0efC91oHQFbwS1EW012HrbqH6AQEZjA38k0dckbZsGznTRtIjIlPbAJnJ3aVEA5lxfypPVrrK4zEt8eAaulwSMcc1hih-LYog-mO59S0aEkesM3dC4rSzNwidl9alcJNydp_coAO1T07oLpc8WJ2_8p9C6q4TKJNydp_coPe3CWJBd13c.",
}


def get_sign(token: str, t: str, data_json: str) -> str:
    """
    生成API请求签名
    需要确保 get_sign.js 文件存在
    """
    try:
        with open("get_sign.js", "r", encoding="utf-8") as f:
            js_code = f.read()
        ctx = execjs.compile(js_code)
        sign = ctx.call("_getSign", token, str(t), data_json)
        return sign
    except Exception as e:
        print(f"[错误] 生成签名失败: {e}")
        return ""


def get_comments_by_id(
    item_id: str,
    cookies: Dict[str, str],
    page_no: int = 1,
    page_size: int = 20,
    rate_type: str = "",
    order_type: str = "",
) -> Optional[Dict[str, Any]]:
    """
    根据商品ID获取评论列表

    Args:
        item_id: 商品ID
        cookies: 淘宝cookies
        page_no: 页码，从1开始
        page_size: 每页数量，建议不超过20
        rate_type: 评论类型，""=全部, "1"=好评, "2"=中评, "3"=差评
        order_type: 排序类型，""=默认, "1"=时间倒序, "2"=时间正序

    Returns:
        包含评论数据的字典，失败返回None
    """
    # 检查cookies中的token
    m_h5_tk = cookies.get("_m_h5_tk", "")
    if not m_h5_tk or "_" not in m_h5_tk:
        print("[错误] cookies中缺少有效的_m_h5_tk")
        return None

    token = m_h5_tk.split("_")[0]
    t = str(int(time.time() * 1000))

    # 构建请求数据
    data_dict = {
        "showTrueCount": False,
        "auctionNumId": str(item_id),
        "pageNo": page_no,
        "pageSize": page_size,
        "rateType": rate_type,
        "searchImpr": "-8",
        "orderType": order_type,
        "expression": "",
        "rateSrc": "pc_rate_list",
    }

    data_json = json.dumps(data_dict, ensure_ascii=False, separators=(",", ":"))

    # 生成签名
    sign = get_sign(token, t, data_json)
    if not sign:
        print("[错误] 签名生成失败")
        return None

    # 构建请求参数
    params = {
        "jsv": "2.7.5",
        "appKey": "12574478",
        "t": t,
        "sign": sign,
        "api": "mtop.taobao.rate.detaillist.get",
        "v": "6.0",
        "isSec": "0",
        "ecode": "1",
        "timeout": "20000",
        "type": "json",
        "dataType": "json",
        "data": data_json,
    }

    # 请求头
    headers = {
        "accept": "*/*",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,ar;q=0.7",
        "cache-control": "no-cache",
        "pragma": "no-cache",
        "referer": "https://item.taobao.com/",
        "sec-ch-ua": '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": '"Windows"',
        "sec-fetch-dest": "script",
        "sec-fetch-mode": "no-cors",
        "sec-fetch-site": "same-site",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
    }

    try:
        # 如果需要代理，可以启用这部分
        proxies = {
            "http": "socks5://127.0.0.1:8442",
            "https": "socks5://127.0.0.1:8442",
        }

        response = requests.get(
            url="https://h5api.m.taobao.com/h5/mtop.taobao.rate.detaillist.get/6.0/",
            headers=headers,
            cookies=cookies,
            params=params,
            proxies=proxies,  # 如果不需要代理，可以注释掉这一行
            timeout=15,
        )
        with open(f"response_{page_no}.json", "w", encoding="utf-8") as f:
            f.write(response.text)
        print(f"[调试] 请求URL: {response.url}")
        print(f"[调试] 响应状态: {response.status_code}")
        print(f"[调试] 响应内容前200字符: {response.text[:200]}")

        if response.status_code != 200:
            print(f"[错误] HTTP状态码: {response.status_code}")
            return None

        res_json = response.json()
        # 检查API响应状态
        if not res_json.get("ret") or res_json["ret"][0] != "SUCCESS::调用成功":
            print(f"[错误] API调用失败: {res_json.get('ret', 'Unknown error')}")
            return None

        return res_json.get("data", {})

    except requests.exceptions.RequestException as e:
        print(f"[错误] 网络请求失败 - ID: {item_id}, 错误: {e}")
        return None
    except json.JSONDecodeError as e:
        print(f"[错误] JSON解析失败 - ID: {item_id}, 错误: {e}")
        print(f"[调试] 原始响应: {response.text[:500]}")
        return None
    except Exception as e:
        print(f"[错误] 未知错误 - ID: {item_id}, 错误: {e}")
        return None


def get_total_pages(
    item_id: str, cookies: Dict[str, str], page_size: int = 20, rate_type: str = ""
) -> int:
    """
    获取商品评论总页数
    """
    # 先获取第1页数据来获取总页数信息
    first_page_data = get_comments_by_id(
        item_id, cookies, page_no=1, page_size=page_size, rate_type=rate_type
    )
    if first_page_data and "totalPage" in first_page_data:
        return int(first_page_data["totalPage"])
    return 0


def get_all_comments(
    item_id: str,
    cookies: Dict[str, str],
    page_size: int = 20,
    rate_type: str = "",
    max_pages: int = None,
) -> List[Dict[str, Any]]:
    """
    获取商品所有评论

    Args:
        max_pages: 最大页数限制，None表示获取所有页
    """
    print(f"[信息] 开始获取商品 {item_id} 的评论...")

    # 获取总页数
    total_pages = get_total_pages(item_id, cookies, page_size, rate_type)
    if total_pages == 0:
        print("[错误] 无法获取总页数")
        return []

    # 限制最大页数
    if max_pages and max_pages < total_pages:
        total_pages = max_pages

    print(f"[信息] 总共需要获取 {total_pages} 页评论")

    all_comments = []
    for page_no in range(1, total_pages + 1):
        print(f"[进度] 正在获取第 {page_no}/{total_pages} 页...")

        page_data = get_comments_by_id(
            item_id, cookies, page_no=page_no, page_size=page_size, rate_type=rate_type
        )
        if page_data and "rateList" in page_data:
            all_comments.extend(page_data["rateList"])
            print(f"[成功] 第 {page_no} 页获取到 {len(page_data['rateList'])} 条评论")
        else:
            print(f"[警告] 第 {page_no} 页获取失败")

        # 添加延时避免请求过快
        time.sleep(1)

    print(f"[完成] 总共获取到 {len(all_comments)} 条评论")
    return all_comments


if __name__ == "__main__":
    item_id = "732745196167"

    # print("=== 获取分页信息 ===")
    # total_pages = get_total_pages(item_id, COOKIES, page_size=10)
    # print(f"总页数: {total_pages}")

    # print("\n=== 获取前3页评论 ===")
    # comments = get_all_comments(item_id, cookies, page_size=10, max_pages=1)
    # print(f"获取到评论数量: {len(comments)}")

    # 如果要获取所有评论，去掉max_pages参数
    all_comments = get_all_comments(item_id, cookies, page_size=10)
