from DrissionPage import Chromium
from lxml import etree
import re
import htmlmin

# from logger.main import setup_logger, logger

# 加载日志
# setup_logger()

# 连接浏览器
browser = Chromium()
page = browser.latest_tab


def serch_procode(procode) -> str:
    """搜索商品

    Args:
        procode (_type_): _description_

    Raises:
        Exception: _description_

    Returns:
        str: 商品详情链接
    """
    page.listen.start('search.html')

    search_url = f'https://www.medchemexpress.cn/search.html?q={procode}'
    page.get(search_url)
    search_html = page.html

    try:
        # 获取所有商品货号和链接
        teq = re.compile(
            r'<li>.*?<dl>.*?<a href="(.*?)">(.*?)</a>.*?</dl>.*?</li>', re.S)
        re_result = teq.finditer(search_html)

        # 匹配是否有相同货号
        for item in re_result:
            code = item.group(2).replace("<em>", "").replace("</em>", "")
            if code == procode:
                return 'https://www.medchemexpress.cn' + item.group(1)

        raise Exception(f"{procode} No result")

    except Exception as e:
        print(f"[{procode}] No search result")
        return None

def get_proinfo(prourl):
    """获取商品详情

    Args:
        prourl (_type_): 商品URL

    Returns:
        _type_: 商品详情
    """
    print(prourl)
    print(prourl.split('/')[-1])
    page.get(prourl)
    # res = page.listen.wait()
    proinfo = page.html
    # with open("prodes.html", "w", encoding="utf-8") as f:
    #     f.write(proinfo)
    print("ok")
    compressed_html_code = htmlmin.minify(proinfo)
    with open("prodes.html", "w", encoding="utf-8") as f:
        f.write(compressed_html_code)
    return compressed_html_code


def extract_content(html_content):
    """获取商品描述

    Args:
        html_content (_type_): 商品详情


    Returns:
        _type_: 描述HTML
    """
    req = re.compile("<!-- 20190731 wdf test -->(.*?)<!--end-->", re.DOTALL)
    res = req.search(html_content)
    if res:
        matched_content = res.group(1)
        return matched_content.replace(" ", "").replace("\n", "")
    else:
        return None


def extract_prices(matched_content, code):
    """获取商品规格和价格

    Args:
        matched_content (_type_): 商品HTML

    Returns:
        _type_: _description_
    """
    # teq = re.compile(
    #     r'<td class="pro_price_1">(?P<spec>.*?)</td>.*?<td class="pro_price_2">.*?￥(?P<price>.*? )<br />.*?</td>',
    #     re.S)
    # teq = re.compile(
    #     r'<tr>.*?<td class="pro_price_1">(.*?)</td>.*?<td class="pro_price_2">.*?￥(.*?)</td>.*?<td class="pro_price_3">.*?</tr>',
    #     re.S)

    teq = re.compile(
        r'<td class=pro_price_1>(.*?)</td> <td class=pro_price_2> (.*?) </td> <td class=pro_price_3>.*?</td> <td class=pro_price_4>.*?<option selected=selected>0</option>'
    )

    temp_list = []

    # 使用 finditer 获取匹配结果
    matches = list(teq.finditer(matched_content))

    try:
        # 检查匹配结果是否为空
        if not matches:
            print(f"[{code}] No price result")
            return None
        else:
            # 遍历所有匹配结果
            for match in matches:
                product_spec = match.group(1).strip().replace(" ", "").replace("&nbsp;", "").strip()  # 获取产品名称
                product_price = match.group(2).strip().replace("<br>", "").replace("￥", "").strip()# 获取产品价格
                if len(product_spec)>10:
                    product_spec = product_spec.split(">")[-1]

                temp_list.append({
                    "spec":
                    product_spec,
                    "price":
                    product_price
                })
            return temp_list

    except Exception as e:
        print(f"[{code}] Error in price result")
