import random
import requests
import json
import csv
import time
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

# 设置请求页码范围
begin = 908
end = begin + 1
print(f"开始爬取，大约需要{(end - begin + 1)*8*3 / 60 : .2f}分钟")

url = "http://www.tcmip.cn:18124/home/browse/"

headers = {
    "Accept": "application/json, text/plain, */*",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Content-Type": "application/json;charset=UTF-8",
    "DNT": "1",
    "Host": "www.tcmip.cn:18124",
    "Origin": "http://www.tcmip.cn",
    "Referer": "http://www.tcmip.cn/",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
}

# 设置重试策略
retry_strategy = Retry(
    total=5,  # 重试次数
    backoff_factor=1,  # 重试时的间隔时间，1秒、2秒、4秒等
    status_forcelist=[500, 502, 503, 504],  # 针对这些状态码进行重试
    allowed_methods=["HEAD", "GET", "OPTIONS", "POST"],  # 对这些HTTP方法进行重试
)

# 配置 HTTPAdapter
adapter = HTTPAdapter(max_retries=retry_strategy)

# 创建一个带重试机制的会话
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)

# 准备 CSV 文件
with open(
    "chinese_medicine_formulas_all.csv", mode="a", newline="", encoding="utf-8"
) as file:
    writer = csv.DictWriter(
        file, fieldnames=["古方名称", "剂型", "处方组成", "处方来源（中医古籍名）"]
    )
    writer.writeheader()  # 写入列名

    # 遍历请求每一页（1 到 2423）
    for page_no in range(begin, end):
        payload = {
            "type": "traditional_chinese_medicine_formulas",
            "pageNo": page_no,
            "pageSize": 20,
            "language": "cn",
        }

        start_time = time.time()  # 记录请求开始时间

        try:
            response = session.post(
                url, headers=headers, json=payload, timeout=10
            )  # 设置10秒的超时时间
            # 计算请求耗时
            elapsed_time = time.time() - start_time
            print(f"第 {page_no} 页请求耗时: {elapsed_time:.2f} 秒")

            # 解析 JSON 数据
            if response.status_code == 200:
                data = response.json().get("data", [])  # 获取数据
                if data:
                    data = data[0]["data"]
                    # 写入当前页面的数据到 CSV 文件
                    for item in data:
                        row = {
                            "古方名称": item.get("古方名称", ""),
                            "剂型": item.get("剂型", ""),
                            "处方组成": ", ".join(item.get("处方组成", [])),
                            "处方来源（中医古籍名）": item.get(
                                "处方来源（中医古籍名）", ""
                            ),
                        }
                        writer.writerow(row)
                    print(f"第 {page_no} 页的数据已写入 CSV 文件！")
            else:
                print(f"请求失败，状态码：{response.status_code}，页码：{page_no}")

        except requests.exceptions.RequestException as e:
            print(f"请求出错，页码：{page_no}，错误信息：{e}")

        # 每个请求随机10 - 20秒
        sleep_time = random.uniform(8, 10)  # 随机生成 10 到 20 秒之间的时间
        print(f"等待 {sleep_time:.2f} 秒后继续...")
        time.sleep(sleep_time)

print("所有数据请求完成！")
