import json
import re

from bs4 import BeautifulSoup
import requests

base_url = "https://bio.vazyme.com"
img_base_url = "https://omo-oss-image.thefastimg.com/"
headers = {
    "accept": "application/json, text/javascript, */*; q=0.01",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "content-type": "application/json;charset=UTF-8",
    "instance": "NEW2023111517561483078",
    "origin": "https://bio.vazyme.com",
    "pragma": "no-cache",
    "priority": "u=1, i",
    "referer": "https://bio.vazyme.com/",
    "sec-ch-ua": '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"macOS"',
    "sec-fetch-dest": "empty",
    "sec-fetch-mode": "cors",
    "sec-fetch-site": "same-origin",
    "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
    "x-requested-with": "XMLHttpRequest",
}


def search_request(keywords: str) -> list | None:
    url = "https://bio.vazyme.com/nportal/fwebapi/cms/lowcode/totalStationSearch"
    data = {
        "from": 0,
        "size": 12,
        "query": {
            "function_score": {
                "min_score": 1e-06,
                "query": {
                    "bool": {
                        "should": [
                            {
                                "bool": {
                                    "filter": [
                                        {"term": {"_index": "nportal_product"}},
                                        {"term": {"TENANT_ID": 345343}},
                                        {"term": {"appId": "143150160001"}},
                                        {"term": {"TID": "product"}},
                                    ],
                                    "should": [
                                        {
                                            "match_phrase": {
                                                "productName.keyword": keywords
                                            }
                                        },
                                        {
                                            "match_phrase": {
                                                "attributeObject.1217537188328968192": keywords
                                            }
                                        },
                                        {
                                            "match_phrase": {
                                                "attributeObject.1217037999481180160": keywords
                                            }
                                        },
                                        {"match_phrase": {"preNewLabelName": keywords}},
                                        {"match_phrase": {"preNewLabel": keywords}},
                                        {"match_phrase": {"summary": keywords}},
                                    ],
                                }
                            }
                        ]
                    }
                },
            }
        },
    }
    data = json.dumps(data, separators=(",", ":"))
    response = requests.post(url, headers=headers, data=data)
    if response.status_code == 200:
        res = response.json()
        if res["data"]["result"]["page"]["totalCount"] == 0:
            return None
        return res["data"]["result"]["list"]
    return None


def search_product(name: str, procode: str) -> list | None:
    if res1 := search_request(name):
        return res1
    if res2 := search_request(procode):
        return res2
    return None


def extract_prodes_url(search_list: list) -> str:
    return base_url + search_list[0]["href"]


def extract_img_url(search_res: list) -> list:
    return [
        {"photo": img_base_url + item["imageUrl"]} for item in search_res[0]["images"]
    ]


def extract_english_with_numbers(text):
    """
    提取字符串中的英文和数字部分，保留特殊符号和格式

    Args:
        text: 输入的字符串

    Returns:
        提取出的英文和数字部分
    """
    # 先找到所有中文字符的位置
    chinese_pattern = r"[\u4e00-\u9fff]+"
    chinese_matches = list(re.finditer(chinese_pattern, text))

    # 如果没有中文，直接返回原文本
    if not chinese_matches:
        return text

    # 找到最后一个中文字符的位置
    last_chinese_end = chinese_matches[-1].end()

    # 提取最后一个中文字符之后的所有内容
    result = text[last_chinese_end:].strip()

    return result


def extract_prodes(url: str) -> str:
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        res = response.text
        soup = BeautifulSoup(res, "html.parser")
        prodes = soup.find("div", class_="p_content content-box active")

        # 判断是否存在广告
        if paragraph_dom := prodes.find(
            "div", class_="reset_style js-reset_style js-adapMobile"
        ):
            p_dom = paragraph_dom.find("p")
            content_str = p_dom.get_text()
            if "升级产品" in content_str:
                p_dom.decompose()

        # 删除多余元素
        container_76 = soup.find(class_="e_container-76 s_layout")
        if container_76:
            container_76.decompose()
        container_83 = soup.find(class_="e_container-83 s_layout")
        if container_83:
            container_83.decompose()

        hidden_input = soup.find("input", type="hidden")
        if hidden_input:
            hidden_input.decompose()

        return str(prodes)
    return None


def process_data(proname: str, procode: str):
    proname = extract_english_with_numbers(proname)
    print("proname", proname)
    if search_list := search_product(proname, procode):
        img_list = extract_img_url(search_list)

        pro_url = extract_prodes_url(search_list)
        if prodes := extract_prodes(pro_url):
            return str(prodes), img_list
    return None


if __name__ == "__main__":
    if prodes := process_data("诺唯赞ChamQ SYBR qPCR Master Mix"):
        print(prodes)
        # with open("prodes.html", "w", encoding="utf-8") as f:
        #     f.write(prodes)
