import os
import requests
from bs4 import BeautifulSoup

model_path = os.path.dirname(os.path.abspath(__file__))
style_file = os.path.join(model_path, "index.html")


base_url = "https://www.cwbio.com"
headers = {
    "origin": "https://www.cwbio.com",
    "referer": "https://www.cwbio.com/",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36",
}


def search_product(keyword: str) -> BeautifulSoup:
    url = f"{base_url}/search?keyword={keyword}"
    response = requests.get(url, headers=headers)
    print(response.status_code)
    soup = BeautifulSoup(response.text, "html.parser")
    if (
        soup.find("div", class_="product_result")
        and soup.find("div", class_="product_result").text == "暂无数据"
    ):
        print("暂无数据")
        return None
    else:
        return soup  # 返回BeautifulSoup对象而不是文本


def extract_product_info(soup: BeautifulSoup) -> list:
    """
    从搜索结果页面提取商品信息

    Args:
        soup: BeautifulSoup对象，包含搜索结果页面

    Returns:
        list: 包含商品信息的字典列表
    """
    if not soup or not soup.find("div", class_="search_contList"):
        return []

    products = []
    product_items = soup.find_all("div", class_="search_contList_list")

    for item in product_items:
        product = {}

        # 提取商品名称和链接
        product_link_elem = item.select_one(".search_contList_listTop dd h3 a")
        if product_link_elem:
            product["name"] = product_link_elem.text.strip()
            product["url"] = base_url + product_link_elem.get("href", "")

        # 提取商品描述
        product_desc_elem = item.select_one(".search_contList_listTop dd p a")
        if product_desc_elem:
            product["description"] = product_desc_elem.text.strip()

        # 提取商品图片
        product_img_elem = item.select_one(".search_contList_listTop dt img")
        if product_img_elem:
            product["image"] = base_url + product_img_elem.get("src", "")

        # 提取商品货号
        product_code_elem = item.select_one(
            ".search_contList_listBottom_info_det li:nth-child(1)"
        )
        if product_code_elem:
            product["code"] = product_code_elem.text.strip()

        # 提取商品规格
        product_spec_elem = item.select_one(
            ".search_contList_listBottom_info_det li:nth-child(2)"
        )
        if product_spec_elem:
            product["specification"] = (
                product_spec_elem.text.strip().replace(" ", "").lower()
            )

        # 提取商品价格
        product_price_discount_elem = item.select_one(
            ".search_contList_listBottom_info_det_discount"
        )
        product_price_original_elem = item.select_one(
            ".search_contList_listBottom_info_det_original_price"
        )

        if product_price_discount_elem:
            product["discount_price"] = product_price_discount_elem.text.strip()
        if product_price_original_elem:
            product["original_price"] = product_price_original_elem.text.strip()

        # 提取交货周期
        product_delivery_elem = item.select_one(
            ".search_contList_listBottom_info_det li:nth-child(4)"
        )
        if product_delivery_elem:
            product["delivery"] = product_delivery_elem.text.strip()

        # 提取商品ID
        cart_elem = item.select_one(".search_contList_listBottom_det_cart")
        if cart_elem:
            product["id"] = cart_elem.get("data_id", "")
            product["sku_id"] = cart_elem.get("data_sku_id", "")

        products.append(product)

    return products


def extract_product_detail(url: str) -> BeautifulSoup:
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, "html.parser")
    prodes = soup.find("div", class_="product_detail_contBottom_main")

    # 给 prodes 下所有 src 属性添加 base_url
    for img in prodes.find_all("img"):
        img["src"] = base_url + img["src"]

    prodes.find("a", class_="download_btn").attrs["href"] = (
        base_url + prodes.find("a", class_="download_btn").attrs["href"]
    )
    prodes.find("a", class_="icon_download_btn").attrs["href"] = (
        base_url + prodes.find("a", class_="icon_download_btn").attrs["href"]
    )
    return prodes


def gen_prodes(soup: BeautifulSoup) -> str:
    """
    根据商品信息,生成商品详情
    """
    style_soup = BeautifulSoup(
        open(style_file, "r", encoding="utf-8").read(), "html.parser"
    )
    template = BeautifulSoup("<div class='cxsw_description'></div>", "html.parser")
    desc = template.find("div", class_="cxsw_description")
    desc.append(style_soup)
    desc.append(soup)
    return str(desc)



if __name__ == "__main__":
    # soup = search_product("CW0580")
    # if soup:
    #     products = extract_product_info(soup)
    #     for product in products:
    #         print(f"商品名称: {product.get('name')}")
    #         print(f"商品描述: {product.get('description')}")
    #         print(f"商品货号: {product.get('code')}")
    #         print(f"商品规格: {product.get('specification')}")
    #         print(f"促销价格: {product.get('discount_price')}")
    #         print(f"原始价格: {product.get('original_price')}")
    #         print(f"交货周期: {product.get('delivery')}")
    #         print(f"商品链接: {product.get('url')}")
    #         print(f"商品图片: {product.get('image')}")
    #         print(f"商品ID: {product.get('id')}")
    #         print("=" * 50)

    soup = extract_product_detail("https://www.cwbio.com/product/detail/id/10210")
    with open("res.html", "w", encoding="utf-8") as f:
        f.write(str(gen_prodes(soup)))
