import requests
import json
import pandas as pd
import time
import random
from concurrent.futures import ThreadPoolExecutor

max_retries = 5
contentlist = []

def crawl_data(q):
    retries = 0
    while True:
        try:
            url = 'https://xkz.cbirc.gov.cn/bx/OPtdJL/getLicence.do?'
            data = {'useState':3,
                    'start': q*10,
                    'limit': 10
                    }
            headers = {
                'Connection': 'keep-alive',
                'Accept': '*/*',
                'Accept-Encoding': 'gzip, deflate, br',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Content-Length': '16',
                'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
                'Host': 'xkz.cbirc.gov.cn',
                'X-Requested-With':'XMLHttpRequest',
                'Origin': 'https://xkz.cbirc.gov.cn',
                'Referer': 'https://xkz.cbirc.gov.cn/jr/',
                'Sec-Ch-Ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
                'Sec-Ch-Ua-Mobile': '?0',
                'Sec-Ch-Ua-Platform': '"Windows"',
                'Sec-Fetch-Dest': 'empty',
                'Sec-Fetch-Mode': 'cors',
                'Sec-Fetch-Site': 'same-origin',
                'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
                'Cookie':'isClick=true; yfx_c_g_u_id_10006849=_ck22102109245218135713153187778; yfx_mr_10006849=%3A%3Amarket_type_free_search%3A%3A%3A%3Abaid'
                         'u%3A%3A%3A%3A%3A%3A%3A%3Awww.baidu.com%3A%3A%3A%3Apmf_from_free_search; yfx_mr_f_10006849=%3A%3Amarket_type_free_search%3A%3A%3A%3Abaidu'
                         '%3A%3A%3A%3A%3A%3A%3A%3Awww.baidu.com%3A%3A%3A%3Apmf_from_free_search; yfx_key_10006849=; yfx_f_l_v_t_10006849=f_t_1632801034370__r_t_1684897'
                         '627362__v_t_1684897627362__r_c_5; JSESSIONID=0000-utXDNuzSbob1hLgncqXC5a:-1'
            }
            request = requests.post(url=url, data=data, headers=headers, timeout=10).text
            result = json.loads(request)
            print(result)
            print(q, result['success'])

            if result['success'] == True:
                for i in range(len(result['datas'])):
                    content = []
                    id = result['datas'][i]['id']
                    flowNo = result['datas'][i]['flowNo']
                    certCode = result['datas'][i]['certCode']
                    fullName = result['datas'][i]['fullName']
                    setDate = result['datas'][i]['setDate']
                    printDate = result['datas'][i]['date']
                    useState = result['datas'][i]['useState']
                    content.append(q)
                    content.append(id)
                    content.append(flowNo)
                    content.append(certCode)
                    content.append(fullName)
                    content.append(setDate)
                    content.append(printDate)
                    content.append(useState)
                    contentlist.append(content)
                time.sleep(random.uniform(7, 10))
                break
            else:
                retries += 1
                time.sleep(random.uniform(7, 10))
                if retries == max_retries:
                    time.sleep(320)
                    print('请求多次未果，休息320秒并继续')
        except:
            retries += 1
            time.sleep(random.uniform(7, 10))
            if retries == max_retries:
                time.sleep(320)
                print('请求多次未果，休息320秒并继续')

if __name__ == '__main__':
    with ThreadPoolExecutor(max_workers=10) as executor:
        q_list = range(3001, 4000)
        results = executor.map(crawl_data, q_list)

    dataframe = pd.DataFrame(contentlist, columns=['page','id', 'flowNo', 'certCode', 'fullName', 'setDate', 'printDate', 'useState'])
    dataframe.to_excel(r"失控3001-4000.xlsx")
    print('爬取成功')
