# 导出 rj 平台有销量商品
# 获取陶术抑制剂分类下所有商品
import requests
from bs4 import BeautifulSoup
import json
import html

base_url = "https://www.targetmol.cn/"
headers = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "max-age=0",
    "priority": "u=0, i",
    "referer": "https://www.tsbiochem.com/",
    "sec-ch-ua": '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"macOS"',
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
}

style_string = """
.product-introduction {
    font-family: 'Arial', sans-serif;
    max-width: 900px;
    margin: 0 auto;
    padding: 20px;
    border-radius: 8px;
    box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
  }
  
.text-red {
color: #e53935;
font-size: 26px;
display: block;
margin: 20px 0 10px;
padding-bottom: 5px;
border-bottom: 2px solid #e53935;
}

.compound-title {
margin-top: 30px;
}

table {
width: 100%;
border-collapse: collapse;
margin-bottom: 20px;
background-color: white;
border-radius: 4px;
overflow: hidden;
}

td {
padding: 12px 15px;
border: 1px solid #ddd;
vertical-align: top;
}

tr td:first-child {
width: 25%;
background-color: #f2f2f2;
font-weight: bold;
}

tr:hover {
background-color: #f5f5f5;
}

sub {
font-size: 0.8em;
vertical-align: sub;
}

#bioactivity, #chemical, #storage {
scroll-margin-top: 20px;
}

@media (max-width: 768px) {
.product-introduction {
    padding: 15px;
}

td {
    padding: 8px 10px;
}

tr td:first-child {
    width: 35%;
}
"""


def get_cate_list() -> list:
    """获取商品分类列表

    Returns:
        list: 商品分类列表
    """
    url = "https://www.targetmol.cn/all-pathways"
    response = requests.get(url, headers=headers)

    _cate_lit = []
    soup = BeautifulSoup(response.text, "html.parser")

    dom_block = soup.find("div", attrs={"role": "region"})

    # 获取所有 a 标签
    links = dom_block.find_all("a", attrs={"class": "results-content-name-card"})

    # 遍历并打印所有 href
    for link in links:
        href = link.get("href")
        if href:
            _cate_lit.append(href)

    req_cate_list = list(set(_cate_lit))

    return req_cate_list


# def get_pro_list(cate_url: str) -> list:
#     """获取商品列表

#     Args:
#         cate_url (str): 商品分类url
#     """
#     _temp_url = []
#     url = base_url + cate_url
#     response = requests.get(url, headers=headers)

#     soup = BeautifulSoup(response.text, "html.parser")

#     dom_block = soup.find("div", attrs={"id": "results-product"})

#     # 获取所有 a 标签
#     links = dom_block.find_all("a", attrs={"class": "name"})

#     for link in links:
#         href = link.get("href")
#         if href:
#             _temp_url.append(href)

#     return _temp_url


def parse_nuxt_data(flat_data):
    """
    解析 Nuxt.js 扁平化数据结构

    Args:
        flat_data: 从 __NUXT_DATA__ 脚本标签中提取的 JSON 数据

    Returns:
        解析后的数据结构
    """
    if not isinstance(flat_data, list) or len(flat_data) < 2:
        return "数据结构不符合预期"

    # 第一个元素通常包含类型信息
    type_info = flat_data[0]

    # 第二个元素包含索引映射
    index_map = flat_data[1]

    # 创建结果对象
    result = {}

    # 解析每个键值对
    for key, index in index_map.items():
        if isinstance(index, int) and 0 <= index < len(flat_data):
            result[key] = resolve_value(flat_data, index)
        else:
            result[key] = f"索引 {index} 无效"

    return result


def resolve_value(flat_data, index):
    """
    递归解析值

    Args:
        flat_data: 完整的扁平化数据
        index: 要解析的值的索引

    Returns:
        解析后的值
    """
    if index >= len(flat_data):
        return f"索引 {index} 超出范围"

    value = flat_data[index]

    # 如果值是一个对象，且包含索引引用，递归解析
    if isinstance(value, dict):
        resolved_dict = {}
        for k, v in value.items():
            if isinstance(v, int) and 0 <= v < len(flat_data):
                resolved_dict[k] = resolve_value(flat_data, v)
            else:
                resolved_dict[k] = v
        return resolved_dict

    # 如果值是一个数组，且包含索引引用，递归解析
    elif isinstance(value, list):
        resolved_list = []
        for item in value:
            if isinstance(item, int) and 0 <= item < len(flat_data):
                resolved_list.append(resolve_value(flat_data, item))
            else:
                resolved_list.append(item)
        return resolved_list

    # 基本类型值直接返回
    else:
        return value


def extract_and_parse_nuxt_data(html_content):
    """
    从 HTML 内容中提取并解析 __NUXT_DATA__

    Args:
        html_content: HTML 页面内容

    Returns:
        解析后的数据
    """
    # 使用 BeautifulSoup 解析 HTML
    soup = BeautifulSoup(html_content, "html.parser")

    # 查找 __NUXT_DATA__ 脚本标签
    script_tag = soup.find("script", id="__NUXT_DATA__")

    if not script_tag:
        return "未找到 __NUXT_DATA__ 标签"

    # 提取 JSON 数据
    try:
        json_data = json.loads(script_tag.string)
        return parse_nuxt_data(json_data)
    except json.JSONDecodeError:
        return "JSON 解析错误"


def get_pro_list(cate_info: dict) -> list:
    """获取分类下的商品列表

    Args:
        cate_info (dict): 分类信息

    Returns:
        list: 商品列表
    """

    _temp_list = []
    # 根据总数计算页数 单页
    url = "https://www.targetmol.cn/api/website2/web/search/kind"
    total = (
        cate_info["kindNum"] / 48
        if cate_info["kindNum"] % 48 == 0
        else cate_info["kindNum"] // 48 + 1
    )
    for page in range(1, total + 1):
        print(f"正在获取第 {page} 页数据")
        headers = {
            "accept": "application/json",
            "accept-language": "zh-CN",
            "authorization;": "",
            "content-type": "application/json",
            "origin": "https://www.targetmol.cn",
            "priority": "u=1, i",
            "sec-ch-ua": '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
        }
        data = {"page": page, "kindId": cate_info["kindId"]}
        data = json.dumps(data, separators=(",", ":"))
        response = requests.post(url, headers=headers, data=data)
        page_data = response.json()["data"]["pros"]
        _temp_list.extend(page_data)

    return _temp_list


def get_prodes(pro_url: str) -> str:
    """_summary_

    Args:
        pro_url (str): _description_

    Returns:
        str: _description_
    """
    headers = {
        "accept": "application/json",
        "accept-language": "zh-CN",
        "authorization;": "",
        "content-type": "application/json",
        "origin": "https://www.targetmol.cn",
        "priority": "u=1, i",
        "referer": "https://www.targetmol.cn/compound/incb_000928",
        "sec-ch-ua": '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": '"macOS"',
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
        "x-login-country": "CN",
        "x-login-ip": "218.19.138.182",
        "x-login-timezone": "UTC",
        "x-login-uuid": "e784abd1-8ae8-4691-a99a-0bedf73ae24b",
    }

    url = "https://www.targetmol.cn" + pro_url
    response = requests.get(url, headers=headers)

    soup = BeautifulSoup(response.text, "html.parser")
    all_tags = soup.find_all()

    # 遍历所有标签
    for tag in all_tags:
        # 获取标签的所有属性
        attrs = dict(tag.attrs)

        # 查找所有以data-v-开头的属性并删除
        for attr in list(attrs.keys()):
            if attr.startswith("data-v-"):
                del tag[attr]

    desc_block = soup.find_all("div", class_="product-description")[0]
    raw_description = str(desc_block).replace("<!-- -->", "")

    _description = BeautifulSoup("", "html.parser")
    div_dom = _description.new_tag("div", attrs={"class": "cxsw_description"})
    style_tag = _description.new_tag("style")
    style_tag.string = style_string
    div_dom.append(style_tag)
    div_dom.append(raw_description)

    return html.unescape(str(div_dom))


def raw_proinfo(pro_item: dict) -> dict:
    """获取商品信息

    Args:
        pro_url (str): 商品url
    """
    pro_url = f'/{pro_item["route"]}/{pro_item["jumpurl"]}'
    _sku_list = []
    for sku in pro_item["standards"]:
        if sku["price"] > 0:
            _sku_list.append(
                {
                    "spec": sku["standard"],
                    "price": sku["price"],
                    # "delivery": sku.get("delivery", "咨询客服"),
                    "delivery": sku.get("delivery", "30"),
                }
            )
        # cms只上传最小规格商品
    return {
        "name": pro_item["name"],
        "code": pro_item["originalNo"],
        "dangerousInfo": {"casNo": pro_item["casNo"]},
        "photos": "https://cdn.targetmol.cn/" + pro_item["pathIcon"],
        "desc": get_prodes(pro_url),
        "sku": _sku_list,
    }
