import requests
from lxml import etree
import re
from concurrent.futures import ThreadPoolExecutor
from typing import List, Dict
import time

base_url = "https://www.medchemexpress.cn"
headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
}


def get_category_list():
    """获取分类列表"""
    url = "https://www.medchemexpress.cn/recombinant-proteins.html"
    response = requests.get(url, headers=headers)

    html = etree.HTML(response.text)
    # 获取所有分类li 标签
    category_dom = html.xpath("//div[@class='category-box']/ul/li")
    category_list = []
    for category in category_dom:
        category_list.append(category.xpath("./div/a/@href")[0])
    return category_list


def request_page(category_url: str, page: int = 1):
    """请求单页货号列表

    Args:
        category_url (str): 分类url
        page (int, optional): 页码. Defaults to 1.

    Returns:
        _type_: 返回请求结果
    """
    url = f"{base_url}{category_url}"
    print(url)
    params = {"page": str(page)}
    response = requests.get(url, headers=headers, params=params)
    return response


def get_page_code(response):
    """获取单页货号列表"""
    _code_list = []
    html = etree.HTML(response.text)
    code_dom = html.xpath("//ul[@class='sub_ctg_list_con']/li")
    for code in code_dom:
        pro_code = code.xpath("./dl/dt[@class='t_pro_list_cat']/text()")[0]
        pro_url = code.xpath("./dl/dd//a[@class='list-history-color']/@href")[0]
        _code_list.append({"pro_code": pro_code, "pro_url": pro_url})
    return _code_list


def process_page(category_url: str, page: int) -> List[Dict]:
    """处理单个页面的请求和数据提取
    
    Args:
        category_url (str): 分类URL
        page (int): 页码
        
    Returns:
        List[Dict]: 该页面的产品列表
    """
    response = request_page(category_url, page)
    return get_page_code(response)


def get_product_list(category_url: str, max_workers: int = 2) -> List[Dict]:
    """获取分类货号列表（多线程版本）

    Args:
        category_url (str): 分类URL
        max_workers (int, optional): 最大线程数. Defaults to 2.

    Returns:
        List[Dict]: 所有页面的产品列表
    """
    # 获取总页数
    response = request_page(category_url)
    pattern = r"getPages\([^,]+, \d+, (\d+), \d+\);"
    total_pages = re.search(pattern, response.text)
    if total_pages:
        total_pages = int(total_pages.group(1))
    else:
        total_pages = 1

    # 使用线程池并行处理所有页面
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 创建所有页面的任务
        futures = [
            executor.submit(process_page, category_url, page)
            for page in range(1, total_pages + 1)
        ]
        
        # 收集所有结果
        _code_list = []
        for future in futures:
            try:
                page_results = future.result()
                _code_list.extend(page_results)
                # 每个线程完成后等待1秒
                time.sleep(1)
            except Exception as e:
                print(f"处理页面时发生错误: {str(e)}")
                
    return _code_list


if __name__ == "__main__":
    # for category in list:
    #     page = get_product_list(category)
    #     print(page)
    test_url = "/proteins/cytokines-and-growth-factors.html"
    code_data = get_product_list(test_url)
    # save_json(code_data, "cytokines.json")
