import requests
from bs4 import BeautifulSoup

base_url = "https://www.medchemexpress.cn"

# 详情页样式
protein_style = """
/* 表格整体样式 */
.details_info_tbl {
    width: 100%;
    border-collapse: separate;
    border-spacing: 0;
    margin: 20px 0;
    background: #fff;
    box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
    border-radius: 8px;
    overflow: hidden;
}

/* 表格标题单元格样式 */
.details_info_th {
    background: #f8f9fa;
    color: #2c3e50;
    font-weight: 600;
    padding: 15px 20px;
    text-align: left;
    width: 200px;
    border-bottom: 1px solid #e9ecef;
    font-size: 14px;
}

/* 表格内容单元格样式 */
.details_info_td {
    padding: 15px 20px;
    border-bottom: 1px solid #e9ecef;
    color: #495057;
    font-size: 14px;
    line-height: 1.6;
}

/* 表格行悬停效果 */
.details_info_tbl tr:hover {
    background-color: #f8f9fa;
}

/* 表格内容区域样式 */
.details_info_td div {
    margin: 0;
}

/* 表格内容段落样式 */
.details_info_td p {
    margin: 0 0 10px 0;
}

/* 链接样式 */
.details_info_td a {
    color: #6a4b92;
    text-decoration: none;
    font-weight: 500;
    transition: color 0.2s ease;
}

.details_info_td a:hover {
    color: #8b6ab2;
    text-decoration: underline;
}

/* 氨基酸序列样式 */
#aa-sequence {
    font-family: monospace;
    font-size: 13px;
    line-height: 1.8;
    background: #f8f9fa;
    padding: 15px;
    border-radius: 4px;
    overflow-x: auto;
}

#aa-sequence span {
    display: inline-block;
    margin: 0 2px;
    padding: 2px 4px;
    border-radius: 3px;
    transition: background-color 0.2s ease;
}

#aa-sequence span:hover {
    background-color: #e9ecef;
}

/* 文档部分样式 */
.details_info_doc {
    display: flex;
    flex-wrap: wrap;
    gap: 15px;
}

.icon_pdf_doc {
    display: inline-flex;
    align-items: center;
    padding: 8px 15px;
    background: #f8f9fa;
    border: 1px solid #dee2e6;
    border-radius: 4px;
    color: #495057;
    text-decoration: none;
    transition: all 0.2s ease;
}

.icon_pdf_doc:hover {
    background: #e9ecef;
    border-color: #ced4da;
    color: #212529;
}

/* 批次选择下拉框样式 */
#pdf_batch_no {
    padding: 8px 12px;
    border: 1px solid #ced4da;
    border-radius: 4px;
    background: #fff;
    color: #495057;
    font-size: 14px;
    margin-left: 10px;
}
"""


def get_code(code):
    """货号处理"""
    try:
        return code.split("-")[0] + "-" + code.split("-")[1]
    except IndexError:
        print(f"货号格式错误{code}")


def search_procode(procode) -> str | None:
    """搜索货号，返回商品详情链接

    Args:
        procode (_type_): 货号

    Returns:
        str | None: 商品详情链接
    """
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Referer": "https://www.medchemexpress.cn/",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
    }
    url = "https://www.medchemexpress.cn/search.html"
    params = {
        "q": procode,
    }
    response = requests.get(url, headers=headers, params=params)
    soup = BeautifulSoup(response.text, "html.parser")
    result_dom = soup.find("ul", class_="srh_rst_list_con")
    # 找到指定的<dt>标签
    dt_tag = result_dom.find_all("dt", class_="s_pro_list_cat")
    for dt in dt_tag:
        a_tag = dt.find("a")
        if a_tag.get_text(strip=True) == procode:
            page_url = base_url + a_tag["href"]
            print(page_url)
            return page_url
    return None


def req_pro_info(page_url) -> BeautifulSoup:
    """请求商品详情页面"""
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Referer": "https://www.medchemexpress.cn/",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
    }
    response = requests.get(page_url, headers=headers)
    print(response.text)
    soup = BeautifulSoup(response.text, "lxml")
    return soup


def get_sku_info(soup) -> list:
    """获取商品sku信息"""
    table_dom = soup.find("table", class_="price_tbl")
    tr_dom = table_dom.find_all("tr")

    # 表格第一行表头为th,从第二行开始为td
    sku_list = []
    for tr in tr_dom[1:]:
        td_dom = tr.find_all("td")

        try:
            # 判断是否在售
            if td_dom[2].get_text(strip=True) == "In-stock":
                sku_list.append(
                    {
                        "spec": td_dom[0].get_text(strip=True).replace(" ", ""),
                        "price": float(
                            td_dom[1].get_text(strip=True).replace("￥", "")
                        ),
                        "delivery_date": td_dom[2].get_text(strip=True),
                    }
                )
        except IndexError:
            print("索引超出范围")
    return sku_list


def get_description(soup) -> str:
    """提取页面详情 DOM,并创建新的 DOM 结构

    Args:
        response (_type_): _description_

    Returns:
        _type_: _description_
    """
    # 原始 DOM 结构
    raw_description = soup.find("div", class_="details_info")

    # 创建新的 DOM 结构
    _description = BeautifulSoup("", "html.parser")
    div_dom = _description.new_tag("div", attrs={"class": "cxsw_description"})

    style_tag = soup.new_tag("style")
    style_tag.string = protein_style
    div_dom.append(style_tag)
    div_dom.append(raw_description)

    return str(div_dom)


def get_picture_url(soup) -> list:
    """获取商品图片URL"""
    picture_url_dom = soup.find("ul", attrs={"class": "carousel-inner carousel-test"})
    if picture_url_dom:
        picture_url_list = picture_url_dom.find_all("img")

    return [
        base_url + item["src"].replace("//file.medchemexpress.cn", "")
        for item in picture_url_list
    ]


def get_pro_code(soup) -> str:
    """获取商品货号"""
    code_dom = soup.find("div", attrs={"id": "pro_detail_hd"})
    if code_dom:
        code_block = code_dom.find("dt")

        code_str = code_block.find("span").get_text(strip=True)
        return code_str.replace("目录号: ", "")
    return None


def get_pro_name(soup) -> str:
    """获取商品名称"""
    name_dom = soup.find("div", attrs={"id": "pro_detail_hd"})
    if name_dom:
        return name_dom.find("strong").get_text(strip=True)
    return None


def search_process(procode: str) -> list | None:
    """搜索商品并获取商品价格列表"""
    url = search_procode(procode)
    if url:  # 商品存在
        sku_list = get_sku_info(url)
        if sku_list:  # 商品在售
            return sku_list
        return None
    return None


def raw_proinfo(pro_url: str) -> dict:
    """获取 MCE 商品原始信息

    货号、名称、图片、描述、sku列表

    Args:
        pro_url (str): 商品 URL

    Returns:
        dict: 商品原始信息
    """
    soup = req_pro_info(pro_url)
    sku_list = get_sku_info(soup)
    if sku_list:  # 商品在售
        picture_url_list = get_picture_url(soup)
        description = get_description(soup)
        procode = get_pro_code(soup)
        proname = get_pro_name(soup)
        return {
            "code": procode,
            "name": proname,
            "photos": picture_url_list,
            "desc": description,
            "sku_list": sku_list,
        }


if __name__ == "__main__":
    req_url = "https://www.medchemexpress.cn/antibody/erab-antibody-ya3343.html"
    sku_list = get_sku_info(req_url)
    print(sku_list)
    # url = search_procode(pro_code)
    # if url:  # 商品存在
    #     sku_list = get_sku_info(url)
    #     if sku_list:  # 商品在售
    #         print(sku_list)
    #     else:
    #         print(f"【商品停售】{pro_code}")
    # else:
    #     print(f"【商品失效】{pro_code}")
