# _*_ coding : utf-8 _*_
# @Time : 2022/1/11 19:12
# @Author : 01.requests.py
# @Project : 小猿圈爬虫--药监总局数据页面爬取
import json
import requests
if __name__ == '__main__':
    #step1 指定url http://125.35.6.84:81/xk/itownet/portal/dzpz.jsp
    url = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXKzsList'
    # UA伪装：将对应的User-Agent封装到一个字典中
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
    }
    for page in range(1,6):
        page = str(page)
        # 处理url携带的参数：封装到字典中
        data = {
            'on': 'true',
            #'page': '1',#从库中的第几部电影开始去取
            'page': page,
            'pageSize': '15',#一次取出的个数
            'productName': '24',
            'conditionType': '1',
            'applyName': '100:90',
        }
        #step2 发起请求
        response = requests.post(url=url,data=data,headers=headers)
        # step3 获取响应数据: json()方法返回的是obj（如果确认响应数据是json类型的，才可以使用json()）
        json_ids = response.json()
        id_list = []
        all_data_list = []
        for dict in json_ids['list']:
            id_list.append(dict['ID'])
    #获取企业详情数据
    post_url = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXKzsById'
    #
    for id in id_list:
        data = {
            'id': id
        }
        detail_json = requests.post(url=url, data=data, headers=headers).json()
        all_data_list.append(detail_json)

    #step4 持久化存储
    fp = open('./allDate.json', 'w', encoding='utf-8')
    json.dump(all_data_list, fp=fp, ensure_ascii=False)

    print('爬取结束！！！')