import urllib.request
import re
from lxml import etree
import time
import random
import json


def clean_chinese(text):
    """清洗中文字符和常见标点"""
    return re.sub(r'[^\u4e00-\u9fa5，。！？【】（）%、]', '', str(text)).strip()


def clear_file():
    """初始化数据文件"""
    with open('Data1.txt', 'w', encoding='utf-8') as f:
        pass
    with open('medical.json', 'w', encoding='utf-8') as f:
        pass # 初始化空数组


def get_html():
    base_url = 'https://www.zhys.com/shicai/{}.html'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/91.0.4472.124 Safari/537.36',
        'Referer': 'https://www.zhys.com/'
    }

    max_retries = 3
    request_delay = 1
    collected_data = []

    for page in range(0, 10):  # 修改循环变量名为page
        page = page+1
        url = base_url.format(page)  # 确保使用当前页码
        retry_count = 0
        success = False

        print(f"\n正在处理数据{page}")

        while retry_count < max_retries and not success:
            try:
                time.sleep(request_delay + random.uniform(0, 0.5))
                req = urllib.request.Request(url=url, headers=headers)

                with urllib.request.urlopen(req, timeout=15) as res:
                    if res.status != 200:
                        raise Exception(f"HTTP状态码异常: {res.status}")

                    html = res.read().decode('utf-8')
                    selector = etree.HTML(html)

                    # 使用更健壮的数据提取方式
                    h1 = selector.xpath('//h1[@class="detail-title"]/span[@class="d-t"]/text()')
                    h2 = selector.xpath('//span[contains(., "别名：")]/text()')
                    h3 = selector.xpath('//div[contains(@class, "gongxiao")]/a/text()')
                    h4 = selector.xpath('//li[contains(@class,"c3")]/p//text()')
                    h5 = selector.xpath('//li[contains(@class,"c4")]/p//text()')

                    # 带类型检查的数据清洗
                    title1 = clean_chinese(h1[0].split('的简介')[0]) if h1 and len(h1) > 0 else "未知名称"
                    title2 = clean_chinese(h2[0].split('：')[-1]) if h2 and len(h2) > 0 else "无别名"
                    title3 = [clean_chinese(item) for item in h3] if h3 else []
                    title4 = clean_chinese("".join(h4)) if h4 else "无信息"
                    title5 = clean_chinese("".join(h5)) if h5 else "无信息"

                    print(f"名称: {title1}")
                    print(f"别名: {title2}")
                    print(f"功效: {'、'.join(title3)}")
                    print(f"适宜人群: {title4}")
                    print(f"禁忌人群: {title5}")

                    # 构建数据结构
                    data_entry = {
                        "id": page,
                        "name": title1,
                        "alias": title2,
                        "effects": title3,
                        "suitable_group": title4,
                        "taboo_group": title5,
                        "source_url": url
                    }
                    data = json.dumps(data_entry, ensure_ascii=False)
                    # 实时写入文件
                    with open('Data1.txt', 'a', encoding='utf-8') as f:
                        f.write(
                            f"页码：{page}\n"
                            f"名称：{title1}\n"
                            f"别名：{title2}\n"
                            f"功效：{'、'.join(title3)}\n"
                            f"适宜人群：{title4}\n"
                            f"禁忌人群：{title5}\n\n"
                        )
                    # 批量更新JSON文件
                    with open('medical.json', 'a', encoding='utf-8') as f:
                        f.writelines(data)
                        f.write('\n')
                    f.close()
                    success = True
                    print(f"数据 {page} 采集成功")

            except Exception as e:
                retry_count += 1
                print(f"第 {page} 页第 {retry_count} 次尝试失败: {str(e)}")
                if retry_count == max_retries:
                    print(f"跳过第 {page} 页")
                    with open('Data1.txt', 'a', encoding='utf-8') as f:
                        f.write(f"页码：{page} 采集失败\n\n")
                    break  # 跳出重试循环继续下一页


if __name__ == '__main__':
    try:
        clear_file()
        start_time = time.time()
        get_html()
        end_time = time.time()
        print(f"\n数据采集完成！总耗时: {end_time - start_time:.2f}秒")

        # 统计采集结果
        with open('medical.json', 'r', encoding='utf-8') as f:
            data = json.load(f)
            print(f"成功采集 {len(data)} 条数据")

    except KeyboardInterrupt:
        print("\n用户中断程序执行")
    except Exception as e:
        print(f"程序运行出错: {str(e)}")