import requests
import json
import time
import random

# 配置信息
BASE_URL = 'https://code.nhsa.gov.cn/hc/stdSpecification/getStdSpecificationListDataCompanyReport.html'
REFERER = 'https://code.nhsa.gov.cn/hc/stdSpecification/toStdSpecificationCompanyReportList.html'
ORIGIN = 'https://code.nhsa.gov.cn'

# 定义一个User-Agent池
user_agent_pool = [
    # Chrome
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    # Edge
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59",
]

# 请求头
HEADERS = {
    'User-Agent': random.choice(user_agent_pool),
    'Referer': REFERER,
    'Origin': ORIGIN,
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
    'X-Requested-With': 'XMLHttpRequest',
    'Accept': 'application/json, text/javascript, */*; q=0.01',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
}

# 初始请求参数
PARAMS = {
    '_search': 'false',
    'nd': str(int(time.time() * 1000)),  # 当前时间戳
    'sord': 'asc',
    'page': 1,
    'rows': 100
}

REPONSE = {
    'total_key': 'total',
    'rows_key': 'rows',
    'msg_key' : 'msg'
}

# 为了避免触发反爬虫机制，等待 3.4秒,亲测访问频率低于0.3333...,即10秒内请求小于等于3次
sleep_seconds = 3.4

# 保存所有数据的列表
all_items = []

def fetch_data(session, params):
    """发送 POST 请求并返回 JSON 数据"""
    try:
        response = session.post(BASE_URL, data=params, headers=HEADERS)
        response.raise_for_status()  # 检查请求是否成功
        return response.json()
    except requests.exceptions.RequestException as e:
        print(f"请求失败: {e}")
        return None

def main():
    # 创建一个会话对象，自动管理 Cookie
    session = requests.Session()

    page = 1
    total_pages = None

    while True:
        print(f"正在获取第 {page} 页数据...")

        # 更新分页参数
        PARAMS['page'] = page

        # 获取当前页的数据
        data = fetch_data(session, PARAMS)

        # 获取总页数
        total_pages = data.get(REPONSE['total_key'])
        if total_pages is None or not total_pages:
            print(f"总页数{total_pages},msg:{data.get(REPONSE['msg_key'])}")
            break

        # 获取当前页的数据
        items = data.get(REPONSE['rows_key'], [])

        if not items:
            print("没有更多数据，退出循环。")
            break

        # 将当前页的数据添加到总列表中
        all_items.extend(items)

        # 如果当前页是最后一页，退出循环
        if page >= total_pages:
            print(f"已获取所有 {total_pages} 页数据。")
            break

        # 更新页码，继续获取下一页
        page += 1

        time.sleep(sleep_seconds)

    # 保存所有数据到 JSON 文件
    with open('cons_items.json', 'w', encoding='utf-8') as f:
        json.dump(all_items, f, ensure_ascii=False, indent=4)

    print(f"共获取 {len(all_items)} 条数据，已保存到 cons_items.json 文件中。")

if __name__ == "__main__":
    main()