# -*- coding: utf-8 -*-

"""
Description: nest 官网搜索工具
"""
import time
import requests
from bs4 import BeautifulSoup
from utils.req import post_request, get_request
base_url = "https://www.cell-nest.com"
image_url = "https://cell-nest.bcdn8.com/"


def del_prolist_attrs(data_lsit: list) -> list:
    """删除商品列表无用属性

    Args:
        data_lsit (_type_): 卖家商品店铺列表

    Returns:
        _type_: _description_
    """

    # 只保留图片链接，货号，商品名
    del_keys = [
        "ProductId",
        "ProductName",
        "ProductSpec",
        "ProductPrice",
        "Packaging",
        "GoodsImg1",
        "Details",
        "OrderBy",
        "AddTime",
        "IsOnline",
        "IsDelete",
        "PriceRules",
        "AttrIds",
        "ProductAttr",
        "SearchAttr",
        "BuyPrice",
        "car_num",
        "cart_checked",
        "GoodsName",
        "IsActivity",
        "Id",
    ]

    for item in data_lsit:
        for key in del_keys:
            item.pop(key, None)  # 不存在也不会报错
    return data_lsit


def parse_search(html_content) -> dict:
    search_res = {
        "page": 1,
        "results": [],
        "total": 0,
    }

    soup = BeautifulSoup(html_content, "html.parser")
    res_dom = soup.find("ul", {"class": "search_ul"})

    if res_dom:
        # 查找所有a标签
        links = res_dom.find_all("a")

        # 存储结果的列表
        results = []
        for link in links:
            # 获取href属性
            href = link.get("href", "")
            # 获取文本内容
            text = link.get_text(strip=True)
            if href and text:
                results.append({"href": href, "text": text})

        # 翻页器
        pagebar = soup.find("div", {"id": "page"})
        # 获取当前页码
        current_page = pagebar.find("li", {"class": "thisclass"}).text
        # 获取总页数
        # 从最后一个页码链接中获取总页数
        pager_indexes = pagebar.find_all("li", {"class": "pager_index"})
        if pager_indexes:
            last_page_link = pager_indexes[-1]
            total_pages = last_page_link.text
        else:
            total_pages = current_page

        search_res["results"] = results
        search_res["page"] = int(current_page)
        search_res["total"] = int(total_pages)
        return search_res
    else:
        return {"results": [], "page": 1, "total": 0}


def search_product(name: str, page: int = 1):
    """
    搜索产品信息

    Args:
        name (str): 产品名称
        page (int, optional): 页码. 默认为 1.

    Returns:
        dict: 包含以下字段的字典:
            - page (int): 当前页码
            - results (list): 搜索结果列表,每个结果包含 href 和 text
            - total (int): 总页数
    """

    headers = {
        "referer": "https://www.cell-nest.com/search",
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
    }
    url = "https://www.cell-nest.com/search"
    params = {"q": name, "typeName": "产品", "page": page}
    response = requests.get(url, headers=headers, params=params)
    return parse_search(response.text)


def get_category_list(category_url: str) -> list[str]:
    """
    获取分类列表

    Args:
        category_url (str): 分类链接

    Returns:
        list: 分类列表
    """
    _category_list = []
    category_id = category_url.split("/")[-1]
    headers = {
        "accept": "application/json, text/javascript, */*; q=0.01",
        "referer": f"https://www.cell-nest.com/product/detail/{category_id}",
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
        "x-requested-with": "XMLHttpRequest",
    }
    url = "https://www.cell-nest.com/handle/getproductno"
    data = {"ProductId": category_id, "pageSize": "6", "pageIndex": "1"}
    response = post_request(url=url, headers=headers, data=data, verify=False)
    res_data = response.json()["data"]
    total_items = res_data["totalNum"]
    page_size = 6

    # 计算总页数
    total_pages = (total_items + page_size - 1) // page_size

    # 如果只有一页数据，直接返回第一页结果
    if total_pages == 1:
        return res_data["list"]

    # 多页数据时，遍历所有页面
    for page in range(1, total_pages + 1):
        data["pageIndex"] = page
        print(f"正在获取 {page} 页")
        response = post_request(url=url, headers=headers, data=data,verify=False)
        res_data = response.json()["data"]
        _category_list.extend(res_data["list"])

        time.sleep(1)
    return del_prolist_attrs(_category_list)


def get_first_category():
    return "123"
