# -*- coding: utf-8 -*-
# !/usr/bin/env python
# !/usr/bin/python3
# @Time    : 2021/3/15 20:28
# @Author  : buke-freedom
# @File    : 药监总局化妆品公司许可证.py
"""
# @description：

"""
import requests
import json

if __name__ == "__main__":
    # 批量获取不同企业的id值
    url = "http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList"
    data = {
        "on": "true",
        "page": "1",
        "pageSize": "15",
        "productName": "",
        "conditionType": "1",
        "applyname": "",
        "applysn": "",
    }
    # UA伪装：将对应的User_Agent封装到一个字典中
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0"
    }
    id_list = []  # 存储企业ID
    all_data_list = []  # 存储所企业的详情
    json_ids = requests.post(url=url, data=data, headers=headers).json()
    for dic in json_ids["list"]:
        id_list.append(dic["ID"])
    # print(id_list)

    # 获取企业详情数据
    post_url = "http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById"
    # 循环遍历所有的企业id来爬取所有企业的详细数据
    for id in id_list:
        data = {
            "id": id
        }
        # 爬取单个企业的详细数据
        detail_json = requests.post(url=post_url, data=data, headers=headers).json()
        # 将单个企业的详细数据追加到all_data_list中
        all_data_list.append(detail_json)
    # 尝试使用爬取的数据
    for person in all_data_list:
        print(person["businessPerson"])
    # 存储数据
    fp = open("./alldata.json", "w", encoding="utf-8")
    json.dump(all_data_list, fp=fp, ensure_ascii=False)

    print("抓取结束!!!")
