# 爬虫模块,BeautifulSoup


import requests
import time
import csv
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import random

# 基础配置
BASE_URL = "https://zhongyibaike.com/"
HERB_LIST_URL = "https://zhongyibaike.com/zhongyao/"  # 中药大全列表页（假设）
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Referer": BASE_URL
}
# 控制爬取频率（随机间隔1-3秒，避免触发反爬）
DELAY_RANGE = (1, 3)


def fetch_page(url):
    """发送请求获取页面内容，带异常处理"""
    try:
        response = requests.get(url, headers=HEADERS, timeout=10)
        response.raise_for_status()  # 抛出HTTP错误状态码
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"请求失败 {url}：{e}")
        return None


def parse_herb_list(html):
    """解析中药列表页，提取中药详情页链接"""
    soup = BeautifulSoup(html, "html.parser")
    herb_links = []
    # 假设中药列表项在class为"herb-item"的a标签中（需根据实际页面调整选择器）
    for item in soup.select(".herb-item a"):
        href = item.get("href")
        if href:
            # 拼接完整URL（处理相对路径）
            full_url = urljoin(BASE_URL, href)
            herb_links.append(full_url)
    return herb_links


def parse_herb_detail(html):
    """解析中药详情页，提取关键信息"""
    soup = BeautifulSoup(html, "html.parser")
    herb_info = {}
    try:
        # 提取中药名称（假设在h1标签中）
        herb_info["name"] = soup.h1.get_text(strip=True) if soup.h1 else "未知名称"
        # 提取功效（假设在class为"gongxiao"的div中）
        gongxiao_tag = soup.select_one(".gongxiao")
        herb_info["功效"] = gongxiao_tag.get_text(strip=True) if gongxiao_tag else "无数据"
        # 提取性味归经（根据实际标签调整）
        xingwei_tag = soup.select_one(".xingwei")
        herb_info["性味归经"] = xingwei_tag.get_text(strip=True) if xingwei_tag else "无数据"
        # 其他字段（如用法用量、禁忌等）类似提取
    except Exception as e:
        print(f"解析详情页失败：{e}")
    return herb_info


def crawl_herbs(max_pages=3):
    """爬取中药大全数据（控制页数，避免过度爬取）"""
    all_herbs = []
    current_page = 1
    while current_page <= max_pages:
        # 处理分页（假设页码参数为page，如?page=1）
        page_url = f"{HERB_LIST_URL}?page={current_page}"
        print(f"爬取列表页：{page_url}")
        html = fetch_page(page_url)
        if not html:
            break

        # 提取详情页链接
        herb_links = parse_herb_list(html)
        if not herb_links:
            print("未找到中药链接，可能已到最后一页")
            break

        # 爬取每个详情页
        for link in herb_links:
            print(f"爬取详情页：{link}")
            detail_html = fetch_page(link)
            if detail_html:
                herb_info = parse_herb_detail(detail_html)
                all_herbs.append(herb_info)
            # 随机延迟，模拟人工浏览
            time.sleep(random.uniform(*DELAY_RANGE))

        current_page += 1
        # 分页间隔稍长
        time.sleep(random.uniform(2, 4))

    # 保存数据到CSV
    with open("herbs.csv", "w", encoding="utf-8-sig", newline="") as f:
        if all_herbs:
            fieldnames = all_herbs[0].keys()
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writeheader()
            writer.writerows(all_herbs)
    print(f"爬取完成，共{len(all_herbs)}条数据，已保存到herbs.csv")


if __name__ == "__main__":
    # 爬取方剂的逻辑类似，只需修改列表页URL和详情页解析规则
    crawl_herbs(max_pages=3)  # 限制爬取页数，避免对服务器造成压力



