import json
import os
import time
from bs4 import BeautifulSoup
import requests

model_path = os.path.dirname(os.path.abspath(__file__))
style_file = os.path.join(model_path, "style.html")


base_url = "https://www.selleck.cn"
heardes = {
    "referer": "https://www.selleck.cn/",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36",
}
session = requests.Session()
session.headers = {
    "referer": "https://www.selleck.cn/",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36",
}


def click_next_page(tab1):
    """点击下一页"""
    pagination_dom = tab1.ele(
        "xpath://*[@id='reactRoot']/div[1]/div/div[2]/div/div/div[2]"
    )
    # 选择下一页
    next_page_dom = pagination_dom.ele(".=zen_page").child(-1)
    # 点击下一页
    next_page_dom.click()


def extract_page_data(raw_data: dict) -> list:
    """提取数据"""
    page_data = json.loads(raw_data)
    res_data = page_data["data"]["products"]
    return res_data


def get_search_token(procode):
    url = f"https://www.selleck.cn/search.html?searchDTO.searchParam={procode}&sp={procode}"
    response = session.get(url, headers=heardes)
    soup = BeautifulSoup(response.text, "html.parser")
    token = soup.find("input", {"name": "_searchToken"})["value"]
    print(f"search_token: {token}")
    return token


def get_search_html(procode, search_token) -> str:
    """获取搜索结果页面html"""
    url = "https://www.selleck.cn/product/search.jhtml"
    data = {
        "searchDTO.searchParam": procode,
        "struts.token.name": "_searchToken",
        "_searchToken": search_token,
    }
    heardes["referer"] = (
        f"https://www.selleck.cn/search.html?searchDTO.searchParam={procode}&sp={procode}"
    )

    response = session.post(url, headers=heardes, data=data)
    print(response.status_code)
    return response.text


def get_prodes(url: str) -> BeautifulSoup:
    """获取产品详情页html"""
    response = requests.get(url, headers=heardes)
    soup = BeautifulSoup(response.text, "html.parser")
    return soup


def extract_proimg_url(soup: BeautifulSoup) -> str | None:
    """提取产品图片url"""
    # div_tag = soup.find("div", {"class": "func-module with-2-gray-border ofhidden mb30"})
    # if div_tag:
    #     print("获取 dom 区域")
    #     img_tag = div_tag.find_all("div")[1].find("img")
    #     if img_tag:
    #         return base_url + img_tag["src"]
    div_tag = soup.find("div", {"class": "w260 fl"})
    if div_tag:
        img_tag = div_tag.find("img")
        if img_tag:
            return base_url + img_tag["src"]
    return None


def extract_prodes(soup: BeautifulSoup) -> BeautifulSoup | None:
    """提取产品详情页html"""
    # print(soup)
    # 选择h3标签中text为生物活性的标签
    h3_tags = soup.find_all("h3")
    for h3_tag in h3_tags:
        if h3_tag.text == "生物活性":
            prodes_dom = h3_tag.parent
            return prodes_dom
        if h3_tag.text == "产品描述":
            prodes_dom = h3_tag.parent
            return prodes_dom
    return None


def extract_prodes_url(html: str) -> str | None:
    """提取产品详情页url"""
    soup = BeautifulSoup(html, "html.parser")
    if tr_tags := soup.find_all("tr", {"name": "productList"}):
        tr_tag = tr_tags[0]
        a_tag = tr_tag.find("a", {"class": "blue bold f15"})
        if a_tag:
            return base_url + a_tag["href"]
        return None
    return None


def gen_prodes(soup: BeautifulSoup) -> str:
    """生成产品详情页html"""
    template = BeautifulSoup("<div class='cxsw_description'></div>", "html.parser")
    style_soup = BeautifulSoup(
        open(style_file, "r", encoding="utf-8").read(), "html.parser"
    )

    desc = template.find("div", class_="cxsw_description")
    desc.append(style_soup)
    desc.append(soup)
    return str(desc)


def process_prodes(procode: str) -> str | None:
    """处理产品详情页"""
    search_token = get_search_token(procode)
    search_html = get_search_html(procode, search_token)
    if prodes_url := extract_prodes_url(search_html):
        time.sleep(1)
        prodes_soup = get_prodes(prodes_url)
        res_soup = extract_prodes(prodes_soup)
        # img_url = extract_proimg_url(prodes_soup)
        prodes_html = gen_prodes(res_soup)
        return prodes_html
    return None
