from utils.convert import (
    header_placeholder,
    add_ad_service,
    footer_placeholder,
    rj_imglist,
)
from bs4 import BeautifulSoup
from bs4 import MarkupResemblesLocatorWarning
import warnings

warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning)


def process_des(html):
    """更新产品详情DOM

    Args:
        html (_type_): _description_

    Returns:
        _type_: _description_
    """
    content = html.replace('<img src=""/>\n', "")
    soup = BeautifulSoup(content, "html.parser")

    # # 删除data:image 的img 标签
    # img_tags = soup.find_all("img", src=lambda x: x and x.startswith("data:image"))
    # if img_tags:
    #     for img_tag in img_tags:
    #         img_tag.decompose()

    # # 删除旧版广告
    # old_ad_tag = soup.find("div", attrs={"id": "con_header"})
    # if old_ad_tag:
    #     old_ad_tag.decompose()

    # 创建新 html
    # _description = BeautifulSoup("", "html.parser")
    # div_dom = _description.new_tag("div", attrs={"class": "cxsw_description"})

    # 创建样式标签
    # style_tag = soup.new_tag("style")
    # style_tag.string = protein_style
    # div_dom.append(style_tag)
    # div_dom.append(soup)

    div_dom = update_service_dom(soup)
    return str(div_dom)


def update_service_dom(soup):
    service_tags = soup.find_all("div", attrs={"class": "cxsw_service"})
    # 更新广告
    if service_tags:
        # 删除广告服务
        for service_tag in service_tags:
            service_tag.decompose()

        # 删除头部占位符
        header_tags = soup.find_all("div", attrs={"class": "cxsw_header_placeholder"})
        if header_tags:
            for header_tag in header_tags:
                header_tag.decompose()

        # 删除底部占位符
        footer_tags = soup.find_all("div", attrs={"class": "cxsw_footer_placeholder"})
        if footer_tags:
            for footer_tag in footer_tags:
                footer_tag.decompose()

    # 创建商品详情 dom
    description_tags = soup.find_all("div", attrs={"class": "cxsw_description"})
    if description_tags:
        return (
            add_ad_service() + header_placeholder() + str(soup) + footer_placeholder()
        )
    else:
        # 创建新 html
        _description = BeautifulSoup("", "html.parser")
        div_dom = _description.new_tag(
            "div", attrs={"class": "cxsw_description"}
        )
        img_dom = _description.new_tag("div")
        image_urls = [
            "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-1.jpg",
            "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-2.jpg",
            "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-3.jpg",
            "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-4.jpg",
        ]
        for url in image_urls:
            img_tag = _description.new_tag("img", attrs={"src": url})
            img_dom.append(img_tag)

        div_dom.append(soup)
        div_dom.append(img_dom)
        return (
            add_ad_service()
            + header_placeholder()
            + str(div_dom)
            + footer_placeholder()
        )


img_list = [
    "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-main-5.png",
    "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-main-1.png",
    "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-main-2.png",
    "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-main-3.png",
    "https://bio-mercury-1316961827.cos.ap-guangzhou.myqcloud.com/photos/sab/SAB-main-4.png",
]


def gen_updata(pro: dict) -> dict:
    pro["photos"] = rj_imglist(img_list)
    pro["desc"] = process_des(pro["desc"])

    for sku in pro["skuInfos"]:
        del sku["differentPrices"]
    return pro
