# -*- coding:utf-8 -*-
# @author: LY
# @time:2022/4/23 0023:9:23
import pdb
import re, warnings, time, os
import threading
import requests, random, json,hashlib
from datetime import datetime

os.chdir(os.path.dirname(__file__))
warnings.filterwarnings("ignore")

from lxml import etree
from urllib.parse import quote, unquote
from queue import Queue
from threading import Thread
from selenium import webdriver
from fake_useragent import UserAgent
from pymouse import *
from pykeyboard import PyKeyboard

from config_set.conf_data import *
from config_set.conn_pool import *
from config_set.operation_mysql import *
from config_set.address_dict import address_json

header = {"Authorization": "Bearer kasjdflqjkeofawejifjfglewjemrgopwekrgopwerjkgopqwerjksgogkp"}
get_keyno = "http://midd.com/api/comp/get_comp_keyno"


class beian_obj():
    def __init__(self):
        beian= {
            'host': '101.35.198.253',
            'port': 3306,
            'user': 'beian',
            'password': 'LeFsP8exKyYLJR2F',
            'database': 'beian',
            'charset': 'utf8'
        }
        while True:
            try:
                self.mysqlobj = get_connection(beian)  # mongodb 库连接对象
                break
            except:
                print("数据库重连")
                time.sleep(3)
        self.ua = UserAgent()
        self.interface_que = Queue(maxsize=1000)
        self.mysql_que = Queue(maxsize=1000)
        self.thread_num = 20
        self.lock = threading.Lock()
        self.stop_list = []
        self.version = "20220423"
        mysqlobj = self.mysqlobj.connection()
        curr = mysqlobj.cursor()
        update_sql = f"""update beian_data_new set new_data = 0 where new_data != 0"""
        conn_update(update_sql, curr, mysqlobj)
        curr.close()
        mysqlobj.close()

    # 入接口
    def save_interface(self, temporary):
        # url_inface = "https://midd.com/api/comp_address/add_comp_address"
        url_inface = "https://midd.jianshequan.com/api/comp_address/add_comp_address"
        header = {"Authorization": "Bearer kasjdflqjkeofawejifjfglewjemrgopwekrgopwerjkgopqwerjksgogkp"}
        while True:
            try:
                response = requests.post(url=url_inface, headers=header, data=temporary, verify=False, allow_redirects=False)
                if response.status_code >= 500:
                    time.sleep(1)
                    continue
                break
            except Exception as e:
                print(f"请求错误：{e}")
                time.sleep(1)
        try:
            print(response.json())
        except:
            print(response.text)

    def save_interface2(self):
        while True:
            temporary = self.interface_que.get()
            # url_inface = "https://midd.com/api/comp_address/add_comp_address"
            url_inface = "https://midd.jianshequan.com/api/comp_address/add_comp_address"
            header = {"Authorization": "Bearer kasjdflqjkeofawejifjfglewjemrgopwekrgopwerjkgopqwerjksgogkp"}
            while True:
                try:
                    response = requests.post(url=url_inface, headers=header, data=temporary, verify=False, allow_redirects=False)
                    if response.status_code >= 500:
                        time.sleep(1)
                        continue
                    break
                except Exception as e:
                    print(f"请求错误：{e}")
                    time.sleep(1)
            try:
                print(response.json())
            except:
                print(response.text)

    def get_keyno_from_interface(self, name, code=""):
        """ 获取公司keyno """
        check_times = 0
        name_list = name.split(",")
        code_list = code.split(",")
        datas = {}
        for index_, name_ in enumerate(name_list):
            if not name_.strip():
                continue
            datas[f"comp[{index_ + 1}][uni_key]"] = index_ + 1
            datas[f"comp[{index_ + 1}][comp_name]"] = name_.strip()
            datas[f"comp[{index_ + 1}][credit_no]"] = code_list[index_]
        while True:
            try:
                insert_response_result = requests.post(url=get_keyno, headers=header, data=datas, verify=False)
                if insert_response_result.status_code != 200:
                    raise
                local_dict = insert_response_result.json()["data"]
                if not local_dict:
                    return None
                return local_dict['keyno_list']
            except Exception as err:
                time.sleep(1)
                check_times += 1
                if check_times < 10:
                    continue
                return None

    #入库
    def save_mysql(self, thead_num):
        mysqlobj = self.mysqlobj.connection()
        curr = mysqlobj.cursor()
        select_sql = f"select id from beian_data_new where unid = '%s'"
        insert_sql = f"insert into beian_data_new(data, comp_name, credit_no, comp_keyno, beian_province_name, beian_type, source_name, version, create_date, update_date, new_data, nokeyno, unid) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
        tries = 1
        numbers = 0
        stop_num = 0
        while True:
            if self.mysql_que.empty():
                if thead_num == len(self.stop_list):
                    return
                time.sleep(2)
                tries += 1
                if tries >= 20:
                    break
                continue
            tries = 1
            temporary = []
            unid_list = []
            for _ in range(200):
                datas = self.mysql_que.get()
                if not datas:
                    stop_num += 1
                    pass
                if datas[4] in unid_list:
                    continue
                comp_name = re.sub("\s", "", datas[0].get("comp_name", ""))
                credit_no = re.sub("\s", "", datas[0].get("credit_no", ""))
                keyno_list = self.get_keyno_from_interface(comp_name, credit_no)
                if keyno_list:
                    comp_keyno = keyno_list["1"] if keyno_list["1"] else ""
                else:
                    comp_keyno = ""
                create_date = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
                update_date = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
                while True:
                    select_result = conn_select(select_sql % (datas[4]), curr)
                    if select_result == "重连":
                        try:
                            mysqlobj.close()
                            curr.close()
                        except:
                            pass
                        mysqlobj = self.mysqlobj.connection()
                        curr = mysqlobj.cursor()
                        time.sleep(1)
                        continue
                    elif not select_result:
                        data_tuple = (re.sub("\s", "", json.dumps(datas[0])), comp_name, credit_no, comp_keyno, re.sub("\s", "", datas[1]), re.sub("\s", "", datas[2]), re.sub("\s", "", datas[3]), self.version, create_date, update_date, 1, 0 if len(comp_keyno) == 32 else 1, re.sub("\s", "", datas[4]))
                        temporary.append(data_tuple)
                        unid_list.append(datas[4])
                        numbers += 1
                    else:
                        if len(comp_keyno) != 32:
                            update_sql = f"update beian_data_new set update_date='{update_date}' where unid = '{datas[4]}'"
                        else:
                            update_sql = f"update beian_data_new set update_date='{update_date}', comp_name='{comp_name}', credit_no='{credit_no}', comp_keyno='{comp_keyno}', nokeyno=0, new_data=1 where unid = '{datas[4]}'"
                        conn_update(update_sql, curr, mysqlobj)
                        print(f"{datas[1], datas[3], datas[4]} 已存在")
                    break
            while True:
                if not temporary:
                    break
                insert_res = conn_insert_many(insert_sql, temporary, curr, mysqlobj)
                if insert_res == "重连":
                    try:
                        mysqlobj.close()
                        curr.close()
                    except:
                        pass
                    mysqlobj = self.mysqlobj.connection()
                    curr = mysqlobj.cursor()
                    time.sleep(1)
                    continue

                print(f"更新了 {numbers} 条数据")
                break

    def get_unid(self, *args):
        if type(args[0]) == dict:
            md = hashlib.md5(json.dumps(args[0]).encode(encoding="utf-8")).hexdigest()
            md5 = md.replace("-", "").lower()
        else:
            md = hashlib.md5(("".join([re.sub("\s", "", str(every)) for every in args])).encode(encoding="utf-8")).hexdigest()
            md5 = md.replace("-", "").lower()
        return md5

    #二级请求
    def get_cecond_lv_page(self, urls, header,method, param=None):
        tries = 1
        while True:
            header["User-Agent"] = self.ua.random
            try:
                if method == "get":
                    if param:
                        response = requests.get(url=urls, headers=header, params=param, verify=False, timeout=20)
                    else:
                        response = requests.get(url=urls, headers=header, verify=False, timeout=20)
                else:
                    if param:
                        response = requests.post(url=urls, headers=header, data=param, verify=False, timeout=20)
                    else:
                        response = requests.get(url=urls, headers=header, verify=False, timeout=20)
                if response.status_code != 200:
                    raise
                return response
            except:
                tries += 1
                if tries >= 20:
                    return False
                time.sleep(1)

    def select_data(self, sorc_plat):
        mysqlobj = self.mysqlobj.connection()
        curr = mysqlobj.cursor()
        while True:
            select_sql = f"select id, pagenum from beian_data_new where source_name = '{sorc_plat}' ORDER BY id limit 1"
            select_result = conn_select(select_sql, curr)
            if select_result == "重连":
                try:
                    mysqlobj.close()
                    curr.close()
                except:
                    pass
                mysqlobj = self.mysqlobj.connection()
                curr = mysqlobj.cursor()
                continue
            elif select_result:
                num = int(select_result[0][1])
                ids = int(select_result[0][0])
            else:
                num = 0
                ids = 0
            break
        numbers = 1
        tries = 1
        temporary = {}
        while True:
            select_data_sql = f"select * from beian_data_new where id > {num} and source_name = '{sorc_plat}' limit 1"
            select_data_res = conn_select(select_data_sql, curr)
            if select_data_res == "重连":
                try:
                    mysqlobj.close()
                    curr.close()
                except:
                    pass
                mysqlobj = self.mysqlobj.connection()
                curr = mysqlobj.cursor()
                continue
            if select_data_res:
                data_tuple = select_data_res[0]
                if ids == 0:
                    ids = data_tuple[0]
                if numbers == 1:
                    temporary[f"version"] = data_tuple[5]
                    temporary[f"beian_province_name"] = data_tuple[2]
                    temporary[f"source_name"] = data_tuple[4]
                    # temporary[f"source_name"] = "来源平台"
                data_dict = json.loads(data_tuple[1])
                temporary[f"beian_comp[{numbers}][beian_type]"] = data_tuple[3]
                temporary[f"beian_comp[{numbers}][comp_name]"] = data_dict["comp_name"]
                if data_dict.get("credit_no", "").strip():
                    temporary[f"beian_comp[{numbers}][credit_no]"] = data_dict["credit_no"].strip()
                if data_dict.get("oper_name", "").strip():
                    temporary[f"beian_comp[{numbers}][oper_name]"] = data_dict["oper_name"].strip()
                numbers += 1
                if numbers == 21:
                    self.interface_que.put(temporary)
                    numbers = 0
                    temporary = {}
                    insert_sql = f"insert into beian_data_new(id, pagenum) values({ids}, {num}) ON DUPLICATE KEY UPDATE id={ids},pagenum={num}"
                    insert_resu = conn_insert(insert_sql, "", curr, mysqlobj)
                    if insert_resu == "重连":
                        try:
                            mysqlobj.close()
                            curr.close()
                        except:
                            pass
                        mysqlobj = self.mysqlobj.connection()
                        curr = mysqlobj.cursor()
                num = data_tuple[0]
            else:
                if numbers == 1 and tries == 1:
                    time.sleep(10)
                else:
                    time.sleep(6)
                    tries += 1
                    if tries >= 10:
                        self.interface_que.put(temporary)
                        numbers = 0
                        temporary = {}
                        tries = 1
                        time.sleep(600)

    #单表
    def select_data2(self):
        mysqlobj = self.mysqlobj.connection()
        curr = mysqlobj.cursor()
        read_num = 2000
        while True:
            select_sql = "select pagenum from beian_data_new where id = 2"
            select_result = conn_select(select_sql, curr)
            if select_result == "重连":
                try:
                    mysqlobj.close()
                    curr.close()
                except:
                    pass
                mysqlobj = self.mysqlobj.connection()
                curr = mysqlobj.cursor()
                continue
            elif select_result:
                num = int(select_result[0][0])
            else:
                num = 0
            select_data_sql = f"select * from beian_data_new where id between {num*read_num+1} and {num*read_num+read_num}"
            select_data_res = conn_select(select_data_sql, curr)
            if select_data_res == "重连":
                try:
                    mysqlobj.close()
                    curr.close()
                except:
                    pass
                mysqlobj = self.mysqlobj.connection()
                curr = mysqlobj.cursor()
                continue
            if select_data_res:
                for datas in select_data_res:
                    self.interface_que.put((datas[1], datas[2], datas[3], datas[4], datas[5]))
                if num != 0:
                    insert_sql = f"insert into beian_data_new(id, pagenum) values(2, {num-1}) ON DUPLICATE KEY UPDATE id=2,pagenum={num-1}"
                    insert_resu = conn_insert(insert_sql, "", curr, mysqlobj)
                    if insert_resu == "重连":
                        try:
                            mysqlobj.close()
                            curr.close()
                        except:
                            pass
                        mysqlobj = self.mysqlobj.connection()
                        curr = mysqlobj.cursor()
                        continue
                num += 1
            else:
                time.sleep(600)

    def str_change_pwd(self, data_str):
        new_str = ""
        for ch in data_str:
            new_str = new_str + ("%u"+str(hex(ord(ch))).replace("0x", "").upper())
        return new_str

    def pwd_change_str(self, data_str):
        new_str = re.sub(r'%u([a-fA-F0-9]{4}|[a-fA-F0-9]{2})', lambda m: chr(int(m.group(1), 16)), data_str)
        return new_str

    #北京市--备案
    def run_BJS(self):
        print("北京市启动")
        pagesize = 15
        pagenum = 1
        while True:
            urls = "http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=308947"        #外地
            header = {
                "Content-Type": "application/x-www-form-urlencoded",
                "Host": "bjjs.zjw.beijing.gov.cn",
                "Origin": "http://bjjs.zjw.beijing.gov.cn",
                "Referer": "http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=308947",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
                "Upgrade-Insecure-Requests": "1"
            }
            datas = {
                "filter_LIKE_QYMC": "",
                "filter_LIKE_YYZZZCH": "",
                "filter_LIKE_ZSBH": "",
                "currentPage": pagenum,
                "pageSize": pagesize,
                "OrderByField": "",
                "OrderByDesc": ""
            }
            response = self.get_cecond_lv_page(urls, header, "post", datas)
            if not response:
                break
            if response.text == "":
                break
            response_html = etree.HTML(response.content.decode(response.apparent_encoding))
            data_url_list = response_html.xpath('//div[@class="content1"]/table[2]//td/a/@href')
            if not data_url_list:
                break
            second_header = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "Accept-Encoding": "gzip, deflate",
                "Host": "bjjs.zjw.beijing.gov.cn    ",
                "Pragma": "no-cache",
                "Referer": "http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=308947",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
                "Upgrade-Insecure-Requests": "1"
            }
            for second_url in data_url_list:
                temporary = {}
                detail_url = "http://bjjs.zjw.beijing.gov.cn" + re.sub("\s", "", second_url)
                second_resp = self.get_cecond_lv_page(detail_url, second_header, "get")
                if not second_resp:
                    continue
                second_resp_html = etree.HTML(second_resp.content.decode(second_resp.apparent_encoding))
                comp_name = second_resp_html.xpath('//table[@class="detailview"]//tr[1]/td[2]/text()')
                if not comp_name:
                    continue
                if len(comp_name[0]) <= 3:
                    continue
                temporary["comp_name"] = re.sub("\s", "", comp_name[0])
                temporary["detail_url"] = detail_url
                comp_addr = second_resp_html.xpath('//table[@class="detailview"]//tr[2]/td[2]/text()')
                if comp_addr:
                    temporary["comp_addr"] = re.sub("\s", "", comp_addr[0])
                credit_no = second_resp_html.xpath('//table[@class="detailview"]//tr/td[@id="CodeDetailSG"]/text()')
                if credit_no:
                    temporary["credit_no"] = re.sub("\s", "", credit_no[0])
                comp_type = second_resp_html.xpath('//table[@class="detailview"]//tr[5]/td[2]/text()')
                if comp_type:
                    temporary["comp_type"] = re.sub("\s", "", comp_type[0])
                oper_name = second_resp_html.xpath('//table[@class="detailview"]//tr[6]/td[2]/text()')
                if oper_name:
                    temporary["oper_name"] = re.sub("\s", "", oper_name[0])
                comp_zzcode = second_resp_html.xpath('//table[@class="detailview"]//tr[7]/td[2]/text()')
                if comp_zzcode:
                    temporary["comp_zzcode"] = re.sub("\s", "", comp_zzcode[0])
                comp_zzlv = second_resp_html.xpath('//table[@class="detailview"]//tr[8]/td[2]/text()')
                if comp_zzlv:
                    temporary["comp_zzlv"] = re.sub("\s", "", comp_zzlv[0])
                comp_beian_date = second_resp_html.xpath('//table[@class="detailview"]//tr[9]/td[2]/text()')
                if comp_beian_date:
                    temporary["comp_beian_date"] = re.sub("\s", "", comp_beian_date[0])
                beian_province_name = "北京市"
                beian_type = "外省"
                source_name = "北京市住房和城乡建设委员会"
                # print(temporary, beian_type, pagenum)
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            pagenum += 1
            time.sleep(0.5)
            if len(data_url_list) < pagesize:
                break
        self.stop_list.append(None)
        print("北京市启动结束")

    def run_BJS2(self):
        pagesize = 15      #参数可变，默认15
        pagenum = 1      #参数可变，默认15
        while True:
            urls = "http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=406547"      #外地监理
            header = {
                "Content-Type": "application/x-www-form-urlencoded",
                "Host": "bjjs.zjw.beijing.gov.cn",
                "Origin": "http://bjjs.zjw.beijing.gov.cn",
                "Referer": "http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=308947",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
                "Upgrade-Insecure-Requests": "1"
            }
            datas = {
                "filter_LIKE_QYMC": "",
                "filter_LIKE_YYZZZCH": "",
                "filter_LIKE_ZSBH": "",
                "currentPage": pagenum,
                "pageSize": pagesize,
                "OrderByField": "",
                "OrderByDesc": ""
            }
            response = self.get_cecond_lv_page(urls, header, "post", datas)
            if not response:
                break
            if response.text == "":
                break
            response_html = etree.HTML(response.content.decode(response.apparent_encoding))
            data_list = response_html.xpath('//form[@id="CommonSearchResult"]//table[@class="gridview"]//tr')
            if not data_list:
                break
            for data_html in data_list[1:]:
                temporary = {}
                comp_name = data_html.xpath('./td[2]/text()')
                if not comp_name:
                    continue
                if len(comp_name[0]) <= 3:
                    continue
                temporary["comp_name"] = comp_name[0]
                temporary["credit_no"] = data_html.xpath('./td[3]/text()')[0]
                temporary["comp_zzcode"] = data_html.xpath('./td[4]/text()')[0]
                detail_url = data_html.xpath('./td[5]/a/@href')[0]
                temporary["detail_url"] = "http://bjjs.zjw.beijing.gov.cn" + detail_url.strip()
                beian_province_name = "北京市"
                beian_type = "外省"
                source_name = "北京市住房和城乡建设委员会"
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            pagenum += 1
            time.sleep(0.5)
            if len(data_list[1:]) < pagesize:
                break
        self.stop_list.append(None)

    #山西省-外省-无信用代码， 有组织机构代码
    def run_SXS(self):
        print("山西省启动")
        pagenum = 1
        while True:
            urls = "https://zjt.shanxi.gov.cn/wsrj/outsidetheprovince/listAjax?ajax=true"
            header = {
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                "Host": "zjt.shanxi.gov.cn",
                "Origin": "https://zjt.shanxi.gov.cn",
                "Referer": "https://zjt.shanxi.gov.cn/wsrj/outsidetheprovince/outsidetheprovinceList.html",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
                "X-Requested-With": "XMLHttpRequest"
            }
            datas = {"pagenum": pagenum, "resourcekey": ""}
            response = self.get_cecond_lv_page(urls, header, "post", datas)
            if not response:
                break
            if response.text == "":
                break
            if pagenum >= 2 and "中铁五局集团第一工程有限责任公司" in response.text:
                break
            response_html = etree.HTML(response.content.decode(response.apparent_encoding))
            data_list = response_html.xpath('//table[@id="tabList"]/tbody/tr')
            if not data_list:
                break
            for data_html in data_list:
                temporary = {}
                temporary["comp_name"] = data_html.xpath('./td[1]/text()')[0]
                if len(temporary.get("comp_name")) <= 3:
                    continue
                temporary["comp_type"] = data_html.xpath('./td[2]/text()')[0]
                temporary["comp_addr"] = data_html.xpath('./td[3]/text()')[0]
                temporary["comp_stay_addr"] = data_html.xpath('./td[4]/text()')[0]
                detail_url = data_html.xpath('./td[5]/a/@href')[0]
                temporary["detail_url"] = "https://zjt.shanxi.gov.cn" + detail_url.strip()
                second_resp = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                if not second_resp:
                    continue
                second_resp_html = etree.HTML(second_resp.content.decode(second_resp.apparent_encoding))
                org_no = second_resp_html.xpath('//table[@class="table2"]//tr[3]/td[2]/text()')
                if org_no:
                    temporary["org_no"] = org_no[0]
                beian_province_name = "山西省"
                beian_type = "外省"
                source_name = "山西省智慧建筑管理服务信息平台"
                # print(temporary, beian_type, pagenum)
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            pagenum += 1
            time.sleep(0.5)
            if len(data_list) < 20:
                break
        self.stop_list.append(None)
        print("山西省启动结束")

    #山西省内
    def run_SXS_inner(self):
        print("山西省启动")
        pagenum = 1
        while True:
            urls = "https://zjt.shanxi.gov.cn/SXJGPublic/Backstage/New_Backstage.ashx?Metoad=Enterprise_List_Fun"
            header = {
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                "Host": "zjt.shanxi.gov.cn",
                "Origin": "https://zjt.shanxi.gov.cn",
                "Referer": "https://zjt.shanxi.gov.cn/SXJGPublic/HTML/Enterprise_List",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
                "X-Requested-With": "XMLHttpRequest"
            }
            datas = {
                "LikeQuery": "",
                "CORPNAME": "",
                "CityNum": "请选择",
                "CertTypeNum": "请选择",
                "LegalMan": "",
                "pageclickednumber_1": (pagenum-1)*10+1,
                "pageclickednumber": 10*pagenum,
                "CorpCode": "",
                "Select_Province": "",
            }
            response = self.get_cecond_lv_page(urls, header, "post", datas)
            if not response:
                break
            if response.text == "":
                break
            data_list = response.json()
            if not data_list:
                break
            for data_html in data_list:
                temporary = {}
                temporary["comp_name"] = data_html.get("CorpName")
                if len(temporary.get("comp_name")) <= 3:
                    continue
                temporary["credit_no"] = data_html.get("CorpCode")
                temporary["oper_name"] = data_html.get("LEGALMAN")
                temporary["econ_kind"] = data_html.get("ECONTYPENAME")
                temporary["comp_city"] = data_html.get("City")
                temporary["register"] = "已登记" if data_html["ISCORPNAME"] == 1 else "未登记"
                beian_province_name = "山西省"
                beian_type = "本省"
                source_name = "山西省智慧建筑管理服务信息平台"
                # print(temporary, pagenum)
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            pagenum += 1
            if len(data_list) < 10:
                break
            time.sleep(0.5)
        self.stop_list.append(None)
        print("山西省内启动结束")

    # 山西省内
    def run_SXS3(self):
        #pagesize参数可变， 默认为10
        pagesize = 200000
        urls = "https://zjt.shanxi.gov.cn/SXJGPublic/Backstage/New_Backstage.ashx?Metoad=Enterprise_List_Fun"
        header = {
            "Accept": "*/*",
            "AContent-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Referer": "https://zjt.shanxi.gov.cn/SXJGPublic/HTML/Enterprise_List",
            "Host": "zjt.shanxi.gov.cn",
            "Origin": "https://zjt.shanxi.gov.cn"
        }
        datas = {
            "LikeQuery": "",
            "CORPNAME": "",
            "CityNum": "",
            "LegalMan": "",
            "pageclickednumber_1": 1,
            "pageclickednumber": pagesize,
            "CorpCode": "",
        }
        response = self.get_cecond_lv_page(urls, header, "post", datas)
        if not response:
            self.stop_list.append(None)
            return
        data_list = response.json()
        if not data_list:
            self.stop_list.append(None)
            return
        for every_data in data_list:
            temporary = {}
            comp_name = every_data.get("CorpName", "")
            if not comp_name or len(comp_name) <= 3:
                continue
            temporary["comp_name"] = comp_name
            credit_no = every_data.get("CorpCode", "")
            if credit_no :
                 temporary["credit_no"] = credit_no
            oper_name = every_data.get("LEGALMAN", "")
            if oper_name:
                 temporary["oper_name"] = oper_name
            beian_province_name = "山西省"
            beian_type = "本省"
            source_name = "山西省智慧建筑管理服务信息平台"
            unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
            self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
        if len(data_list) < pagesize:
            self.stop_list.append(None)
            return
        self.stop_list.append(None)

    #河北省
    def run_HeBS(self):
        print("河北省启动")
        #pagesize参数可变，默认10
        type_dict = {
            "建筑业企业": 3,
            "房地产开发企业": 13,
            "物业服务企业": 15,
            "房地产经纪机构": 14,
            "工程勘察企业": 1,
            "工程设计企业": 2,
            "工程监理企业": 4,
            "工程造价咨询单位": 7,
            "工程招标代理机构": 5,
            "建设工程质量检测机构": 10,
            "供热企业": 16,
            "燃气经营企业": 17,
            "施工图审查机构": 9,
        }
        pageSize = 200000
        urls = "http://110.249.221.5:8005/api/QyController/list?currentPage={}&pageSize={}&certtypenum={}&nameOrCode="
        # urls = "http://110.249.221.5:8005/api/QyController/list?currentPage=1&pageSize=20&certtypenum=3&nameOrCode="
        header = {
            "Accept": "application/json, text/plain, */*",
            "Host": "110.249.221.5:8005",
            "Referer": "http://110.249.221.5:8005/"
        }
        for keys, values in type_dict.items():
            pagenum = 1
            while True:
                urls = urls.format(pagenum, pageSize, values)
                response = self.get_cecond_lv_page(urls, header, "get")
                if not response:
                    break
                data_list = response.json()
                if not data_list:
                    break
                for every_data in data_list["rows"]:
                    temporary = {}
                    comp_name = every_data.get("CORPNAME", "")
                    if not comp_name or len(comp_name) <= 3:
                        continue
                    temporary["comp_name"] = comp_name
                    credit_no = every_data.get("UNI_SC_ID", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    oper_name = every_data.get("PERSON_NAME", "")
                    if oper_name:
                        temporary["oper_name"] = oper_name
                    date_url_id = every_data.get("CORPCODE", "")
                    if date_url_id:
                        temporary["date_url_id"] = oper_name
                    temporary["comp_zz"] = keys
                    beian_province_name = "河北省"
                    beian_type = "本省"
                    source_name = "河北省住房和城乡建设厅"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list["rows"]) < pageSize:
                    break
        self.stop_list.append(None)
        print("河北省结束")

    #内蒙古 -- 卡
    def run_NMG(self):
        print("内蒙古启动")
        pagenum = 1
        while True:
            times = int(time.time()*1000)
            urls = "http://110.16.70.26/nmjgpublisher/handle/ProjectsInfoHandler.ashx?type=CorpInfo&lblPageCount=0&lblPageIndex={}&lblRowsCount=0&lblPageSize=20&SFZBDL=&CorpName=&Zzlx=&CertNum=&City=&_={}".format(pagenum, times)
            header = {
                "Host": "110.16.70.26",
                "X-Requested-With": "XMLHttpRequest",
                "Referer": "http://110.16.70.26/nmjgpublisher/corpinfo/CorpInfoObtain.aspx",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
            }
            if pagenum == 1:
                datas = {
                "type": "CorpInfo",
                "lblPageCount": 0,
                "lblPageIndex": 1,
                "lblRowsCount": 0,
                "lblPageSize": 20,
                "SFZBDL": "",
                "CorpName": "",
                "Zzlx": "",
                "CertNum": "",
                "City": "",
                "_": times}
            else:
                datas = {
                    "type": "CorpInfo",
                    "lblPageCount": 45,
                    "lblPageIndex": 1,
                    "lblPageSize": 20,
                    "pageSize": 15,
                    "SFZBDL": "",
                    "Zzlx": "",
                    "CertNum": "",
                    "City": "",
                    "_": times,
                    "CorpName": ""}
            response = self.get_cecond_lv_page(urls, header, "post", datas)
            if not response:
                break
            if response.text == "":
                break
            pagenum += 1
            time.sleep(0.5)
            try:
                response_html = etree.HTML(response.json()["tb"])
            except:
                time.sleep(1)
                continue
            if not response_html:
                break
            try:
                data_list = response_html.xpath("//tr")
            except:
                time.sleep(1)
                continue
            if not data_list:
                break
            for data_html in data_list:
                temporary = {}
                comp_name = data_html.xpath("./td[2]/a/text()")
                if comp_name:
                    temporary["comp_name"] = comp_name[0]
                    if len(temporary.get("comp_name")) <= 3:
                        continue
                credit_no = data_html.xpath("./td[3]/text()")
                if credit_no:
                    temporary["credit_no"] = credit_no[0]
                oper_name = data_html.xpath("./td[5]/text()")
                if oper_name:
                    temporary["oper_name"] = oper_name[0]
                econ_kind = data_html.xpath("./td[6]/text()")
                if econ_kind:
                    temporary["econ_kind"] = econ_kind[0]
                comp_city = data_html.xpath("./td[4]/text()")
                if comp_city:
                    temporary["comp_city"] = comp_city[0]
                temporary["detail_url"] = "http://110.16.70.26/nmjgpublisher/corpinfo/CorpDetailInfoObtain.aspx?CorpCode={}&CorpName={}&VType=1".format(temporary["credit_no"], self.str_change_pwd(temporary["comp_name"]))
                beian_province_name = "内蒙古自治区"
                beian_type = "本省"
                source_name = "内蒙古自治区建筑市场监管与诚信信息平台"
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            if len(data_list) < 20:
                break
        self.stop_list.append(None)
        print("内蒙古启动结束")

    #辽宁省
    def run_LNS(self):
        print("辽宁省启动")
        url = "http://218.60.144.163/LNJGPublisher/corpinfo/CorpInfo.aspx"
        data = {
            "__EVENTTARGET": "",
            "__EVENTARGUMENT": "",
            "__VIEWSTATE": "",
            "__EVENTVALIDATION": "",
        }
        header = {
            "Host": "218.60.144.163",
            "Content-Type": "application/x-www-form-urlencoded",
            "Origin": "http://218.60.144.163",
            "Referer": "http://218.60.144.163/LNJGPublisher/Corpinfo/CorpInfo.aspx",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }
        datas2 = ""
        xpath = ""
        resonse_start = self.get_cecond_lv_page(url, header, "get")
        if not resonse_start:
            time.sleep(1)
            return
            self.run_LNS()
            return
        resonse_start_html = etree.HTML(resonse_start.content.decode(resonse_start.apparent_encoding))
        try:
            VIEWSTATE = resonse_start_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
            EVENTVALIDATION = resonse_start_html.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0]
        except:
            time.sleep(1)
            self.run_LNS()
            return
        for types in [1, 2]:
            pagenum = 1
            data["hidd_type"] = types
            datas = "__EVENTTARGET=&__EVENTARGUMENT=&__VIEWSTATE={}&__EVENTVALIDATION={}&hidd_type={}&txtCorpName=&txtFOrgCode=&btnSearch=&txtCertNum=&newpage=&newpage1=".format(
                quote(VIEWSTATE, safe=""), quote(EVENTVALIDATION, safe=""), types)
            while True:
                if pagenum == 1:
                    if types == 1:
                        data["__EVENTTARGET"] = "Linkbutton3"
                        xpath = '//div[@id="div_Province"]//table[@class="company_list table_list"]/tbody/tr'
                        response = self.get_cecond_lv_page(url, header, "post", datas)
                    else:
                        data["__EVENTTARGET"] = "Linkbutton8"
                        datas2 = datas2.replace("Linkbutton3", "")
                        xpath = '//div[@id="div_outCast"]//table[@class="company_list table_list"]/tbody/tr'
                        response = self.get_cecond_lv_page(url, header, "post", datas)
                    if not response:
                        break
                    if response.text == "":
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding).replace("\r\n", "").replace("\n", ""))
                    data["__EVENTARGUMENT"] = ""
                    try:
                        data["__VIEWSTATE"] = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        data["__EVENTVALIDATION"] = response_html.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0]
                    except:
                        time.sleep(1)
                        continue
                    data_list = response_html.xpath(xpath)
                    get_second = True
                    for data_html in data_list:
                        temporary = {}
                        if types == 1:
                            comp_name = data_html.xpath("./td[3]/a/text()")
                            if not comp_name:
                                continue
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                            credit_no = data_html.xpath("./td[2]/text()")
                            if credit_no:
                                temporary["credit_no"] = credit_no[0].strip()
                            oper_name = data_html.xpath("./td[4]/text()")
                            if oper_name:
                                temporary["oper_name"] = oper_name[0].strip()
                            temporary["detail_url"] = f"http://218.60.144.163/LNJGPublisher/Corpinfo/CorpDetailInfo.aspx?rowGuid={temporary['credit_no']}&CorpCode={temporary['credit_no']}&CorpName={quote(temporary['comp_name'])}"
                        else:
                            comp_name = data_html.xpath('./td[2]/text()')
                            if not comp_name:
                                continue
                            if len(comp_name[0].strip()) <= 3:
                                continue
                            if "测试单位" in comp_name[0].strip():
                                continue
                            detail_url = data_html.xpath("./td[6]/a/@onclick")
                            temporary["privice"] = data_html.xpath("./td[4]/text()")[0].strip()
                            if not detail_url:
                                continue
                            detail_url = re.findall("'(.*?)'", detail_url[0])
                            if not detail_url:
                                continue
                            if re.sub("\s", "", detail_url[0]) == "":
                                continue
                            temporary["detail_url"] = "http://218.60.144.163/LNJGPublisher/corpinfo/outCaseCorpDetailInfo.aspx?Fid=%s" % (detail_url[0])
                            if get_second:
                                secod_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                            else:
                                secod_response = False
                            if not secod_response:
                                get_second = False
                                credit_no = data_html.xpath('./td[3]/text()')
                                if credit_no:
                                    temporary["credit_no"] = credit_no[0].strip()
                                comp_name = data_html.xpath('./td[2]/text()')
                                if comp_name:
                                    temporary["comp_name"] = comp_name[0].strip()
                                oper_name = data_html.xpath('./td[5]/text()')
                                if oper_name:
                                    temporary["oper_name"] = oper_name[0].strip()
                            else:
                                secod_response_html = etree.HTML(secod_response.content.decode(secod_response.apparent_encoding))
                                temporary["credit_no"] = secod_response_html.xpath('//td[@id="CorpCode"]/text()')[0].strip()
                                temporary["comp_name"] = secod_response_html.xpath('//td[@class="name_level3"]/text()')[0].strip()
                                temporary["oper_name"] = secod_response_html.xpath('//td[@id="LinkMan"]/text()')[0].strip()
                        beian_province_name = "辽宁省"
                        beian_type = "本省" if types == 1 else "外省"
                        # print(temporary, beian_type, pagenum)
                        source_name = "辽宁省建筑市场公共服务平台"
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                else:
                    datas2 = "__EVENTTARGET={}&__EVENTARGUMENT=&__VIEWSTATE={}&__EVENTVALIDATION={}&hidd_type={}&txtCorpName=&txtFOrgCode=&txtCertNum=&newpage={}&newpage1=".format(data["__EVENTTARGET"], quote(data["__VIEWSTATE"], safe=''), quote(data["__EVENTVALIDATION"], safe=''), types, pagenum-1)
                    response = self.get_cecond_lv_page(url, header, "post", datas2)
                    if not response:
                        break
                    if response.text == "":
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    try:
                        data["__VIEWSTATE"] = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        data["__EVENTVALIDATION"] = response_html.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0]
                    except:
                        time.sleep(1)
                        continue
                    data_list = response_html.xpath(xpath)
                    get_second = True
                    for data_html in data_list:
                        temporary = {}
                        if types == 1:
                            comp_name = data_html.xpath("./td[3]/a/text()")
                            if not comp_name:
                                continue
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                            credit_no = data_html.xpath("./td[2]/text()")
                            if credit_no:
                                temporary["credit_no"] = credit_no[0]
                            oper_name = data_html.xpath("./td[4]/text()")
                            if oper_name:
                                temporary["oper_name"] = oper_name[0].strip()
                            temporary["detail_url"] = f"http://218.60.144.163/LNJGPublisher/Corpinfo/CorpDetailInfo.aspx?rowGuid={temporary['credit_no']}&CorpCode={temporary['credit_no']}&CorpName={quote(temporary['comp_name'])}"
                        else:
                            comp_name = data_html.xpath('./td[2]/text()')
                            if not comp_name:
                                continue
                            if len(comp_name[0].strip()) <= 3:
                                continue
                            if "测试单位" in comp_name[0].strip():
                                continue
                            detail_url = data_html.xpath("./td[6]/a/@onclick")
                            temporary["privice"] = data_html.xpath("./td[4]/text()")[0].strip()
                            if not detail_url:
                                continue
                            detail_url = re.findall("'(.*?)'", detail_url[0])
                            if not detail_url:
                                continue
                            if re.sub("\s", "", detail_url[0]) == "":
                                continue
                            temporary[
                                "detail_url"] = "http://218.60.144.163/LNJGPublisher/corpinfo/outCaseCorpDetailInfo.aspx?Fid=%s" % (
                            detail_url[0])
                            if get_second:
                                secod_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                            else:
                                secod_response = False
                            if not secod_response:
                                get_second = False
                                credit_no = data_html.xpath('./td[3]/text()')
                                if credit_no:
                                    temporary["credit_no"] = credit_no[0].strip()
                                comp_name = data_html.xpath('./td[2]/text()')
                                if comp_name:
                                    temporary["comp_name"] = comp_name[0].strip()
                                oper_name = data_html.xpath('./td[5]/text()')
                                if oper_name:
                                    temporary["oper_name"] = oper_name[0].strip()
                            else:
                                secod_response_html = etree.HTML(
                                    secod_response.content.decode(secod_response.apparent_encoding))
                                temporary["credit_no"] = secod_response_html.xpath('//td[@id="CorpCode"]/text()')[
                                    0].strip()
                                temporary["comp_name"] = secod_response_html.xpath('//td[@class="name_level3"]/text()')[
                                    0].strip()
                                temporary["oper_name"] = secod_response_html.xpath('//td[@id="LinkMan"]/text()')[
                                    0].strip()
                        beian_province_name = "辽宁省"
                        beian_type = "本省" if types == 1 else "外省"
                        source_name = "辽宁省建筑市场公共服务平台"
                        # print(temporary, beian_type, pagenum)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                if len(data_list) < 20:
                    break
                time.sleep(0.5)
        self.stop_list.append(None)
        print("辽宁省启动结束")

    #吉林省
    def run_JLS(self):
        print("吉林省启动")
        url_list = ["http://cx.jlsjsxxw.com/handle/NewHandler.ashx?method=SnCorpData&CorpName=&QualiType=&TradeID=&BoundID=&LevelID=&CityNum=&nPageIndex={}&nPageCount={}&nPageRowsCount={}&nPageSize={}&_={}","http://cx.jlsjsxxw.com/handle/NewHandler.ashx?method=SwCorpData&CorpName=&AptitudeNum=&TradeID=&BoundID=&LevelID=&ProvinceNum=&nPageIndex={}&nPageCount={}&nPageRowsCount={}&nPageSize={}&_={}"]
        for urls in url_list:
            times = int(time.time() * 1000) - 1
            pagenum = 1
            pagesize = 20
            pagecount = 0
            pagerowcount = 0
            header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Host": "cx.jlsjsxxw.com",
            "Referer": "http://cx.jlsjsxxw.com/corpinfo/CorpInfo.aspx",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36",
            }
            while True:
                url_new = urls.format(pagenum, pagecount, pagerowcount, pagesize, times)
                response = self.get_cecond_lv_page(url_new, header, "get")
                if not response:
                    break
                if response.text == "":
                    break
                response_dicr = response.json()

                pagecount = response_dicr.get("nPageCount", 0)
                pagerowcount = response_dicr.get("nPageRowsCount", 0)
                response_html = etree.HTML(response_dicr["tb"])
                url_page_list = response_html.xpath('//tr/td/a/@href')
                if not url_page_list:
                    break
                for second_url in url_page_list:
                    second_url_new = "http://cx.jlsjsxxw.com" + second_url.replace("..", "")
                    header["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
                    second_response = self.get_cecond_lv_page(second_url_new, header, "get")
                    if not second_response:
                        break
                    temporary = {}
                    second_response_html = etree.HTML(second_response.content.decode(second_response.apparent_encoding))
                    comp_name = second_response_html.xpath('//label[@id="lblCorpName1"]/text()')
                    if not comp_name:
                        continue
                    if len(comp_name[0]) <= 3:
                        continue
                    temporary["comp_name"] = comp_name[0].strip()
                    credit_no = second_response_html.xpath('//label[@id="LicenseNum"]/text()')
                    if credit_no:
                        temporary["credit_no"] = credit_no[0].strip()
                    oper_name = second_response_html.xpath('//label[@id="LegalMan"]/text()')
                    if oper_name:
                        temporary["oper_name"] = oper_name[0].strip()
                    temporary["detail_url"] = second_url_new
                    beian_province_name = "吉林省"
                    beian_type = "本省" if "SwCorpData" not in url_new else "外省"
                    # print(temporary, pagenum, beian_type)
                    source_name = "吉林省建筑市场监管公共服务平台"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(url_page_list) < pagesize:
                    break
        self.stop_list.append(None)
        print("吉林省结束")

    #福建省
    def run_FJS(self):
        # pageSize 参数可变
        #outside=false 本省
        #outside=true 外省
        print("福建省启动")
        pagesize = 1000
        pagenum = 1
        for types in ["false", "true"]:
            while True:
                url = "https://220.160.52.164:8813/credit/tendering/corps?pageNum={}&pageSize={}&corpName=&outside={}&industryId=&socialCreditCode=".format(pagenum, pagesize, types)
                header = {
                    "Host": "220.160.52.164:8813",
                    "Referer": "https://220.160.52.164:8813/gaia/infoPublic/index.html",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                # datas = "pageNum={}&pageSize={}&corpName=&outside={}&industryId=&socialCreditCode=".format(pagenum, pagesize, types)
                response = self.get_cecond_lv_page(url, header, "get")
                if not response:
                    break
                if response.text == "":
                    break
                pagenum += 1
                time.sleep(0.5)
                data_list = response.json()["data"]
                if data_list:
                    for every_data in data_list["list"]:
                        temporary = {}
                        temporary["comp_name"] = every_data.get("corpName")
                        if len(temporary.get("comp_name")) <= 3:
                            continue
                        temporary["credit_no"] = every_data.get("socialCreditCode")
                        temporary["comp_addr"] = every_data.get("divisonName")
                        temporary["comp_zz"] = ",".join(every_data.get("qualifyNames"))
                        temporary["comp_zzcode"] = "".join(every_data.get("qualifyNumbers"))
                        temporary["detail_url"] = "https://220.160.52.164:8813/credit/tendering/corp/{}".format(quote(temporary["comp_name"]))
                        beian_province_name = "福建省"
                        beian_type = "本省" if types == "false" else "外省"
                        source_name = "福建省建设行业信息公开平台"
                        # print(temporary)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                    if data_list["total"] < pagesize:
                        break
                    if len(data_list["list"]) < pagesize:
                        break
                else:
                    break
        self.stop_list.append(None)
        print("福建省启动结束")

    #江西省
    def run_JXS(self):
        # $pgsz 参数可变
        print("江西省启动")
        pagesize = 200000
        for types in ["getCompDate", "listPubViolation", "listBaPubic"]:   #
            pagenum = 1
            while True:
                url = "http://zjy.jxjst.gov.cn/w/dataQuery/{}".format(types) #省外
                if types == "listBaPubic":
                    datas = {"certType": "", "compName": "", "compCreditCode": "", "$pgsz": pagesize, "$pg": pagenum} #省外
                else:
                    datas = {"parameter": "", "$pgsz": pagesize, "$pg": pagenum}
                header = {
                    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                    "Host": "zjy.jxjst.gov.cn",
                    "Origin": "http://zjy.jxjst.gov.cn",
                    "Referer": url, #省内
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                response = self.get_cecond_lv_page(url, header, "post", datas)
                if not response:
                    break
                if response.text == "":
                    break
                data_list = response.json()["rows"]
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    temporary["comp_name"] = every_data["compName"] if every_data.get("compName") else every_data["punishTargetName"]
                    if len(temporary.get("comp_name")) <= 3:
                        continue
                    temporary["comp_stayname"] = every_data.get("branchFzr", "") if every_data.get("branchFzr") else ""
                    temporary["credit_no"] = every_data.get("compCreditCode", "") if every_data.get("compCreditCode") else every_data.get("punishTargetCode", "")
                    temporary["oper_name"] = every_data.get("cmName", "") if every_data.get("cmName") else every_data.get("compCeoName", "")
                    temporary["comp_type"] = every_data.get("dataValue", "") if every_data.get("dataValue") else every_data.get("compTypeName", "")
                    if every_data.get("compId") and every_data.get("certType"):
                        temporary["detail_url"] = "http://zjy.jxjst.gov.cn/w/dataQuery/ba/comp/view/{}/{}".format(every_data["compId"], every_data["certType"])
                    beian_province_name = "江西省"
                    beian_type = "外省" if types != "getCompDate" else "本省"
                    source_name = "江西住建云"
                    # print(temporary,beian_type,pagenum)
                    unidd = self.get_unid(re.sub("\s", "", temporary["comp_name"]), temporary.get("credit_no", ""),  temporary.get("oper_name", ""), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)
        print("江西省启动结束")

    # 山东省
    def run_SDS(self):
        print("山东省开始")
        header = {
            "Accept": "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01",
            "Content-Type": "application/x-www-form-urlencoded",
            # "Cookie": "SHIROJSESSIONID=e80ac335-4d2b-465a-8dcc-b033884dfd60",
            "Host": "221.214.94.41:81",
            "Origin": "http://221.214.94.41:81",
            "Referer": "http://221.214.94.41:81/xyzj/DTFront/ZongHeSearch/?searchType=0",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        type_dict = {"110": "工程勘察", "111": "工程设计", "112": "建筑业", "113": "工程监理", "118": "施工图审查", "123": "房地产开发"}
        for types in [110, 111, 112, 113, 118, 123]:
            pagenum = 1
            pagesize = 12
            while True:
                # pagesize参数可变 默认12
                data_str = "methodname=GetCorpInfo&CorpName=&CorpCode=&CertType={}&LegalMan=&CurrPageIndex={}&PageSize={}".format(
                    types, pagenum, pagesize)
                urls = "http://221.214.94.41:81/InformationReleasing/Ashx/InformationReleasing.ashx?callback=jQuery17102513546121411592_{}".format(
                    int(time.time() * 1000))
                header["User-Agent"] = self.ua.random
                response = requests.post(url=urls, headers=header, data=data_str, verify=False)
                if not response:
                    break
                if response.text == "":
                    break
                response_dumps = re.sub("jQuery.*?\(|\)$", "", response.text)
                data_ = json.loads(response_dumps).get("data")
                if not data_:
                    break
                data_list = data_.get("CorpInfoList")
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    comp_name = every_data.get("CorpName", "")
                    if not comp_name:
                        continue
                    if len(comp_name) <= 3:
                        continue
                    temporary["comp_name"] = comp_name
                    credit_no = every_data.get("CorpCode", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    temporary["oper_name"] = every_data.get("LegalMan", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    temporary["types"] = type_dict.get(types, "")
                    beian_province_name = "山东省"
                    beian_type = "本省" if "山东" in every_data.get("AreaName", "") else "外省"
                    source_name = "山东省住房城乡建设服务监管与信用信息综合平台"
                    # print(temporary, types, beian_type, pagenum)
                    unidd = self.get_unid(temporary["comp_name"], temporary["credit_no"], temporary["oper_name"],
                                          beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)
        print("山东省结束")

    # 山东省
    def run_SDS_outside(self):
        header = {
            "Accept": "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01",
            "Content-Type": "application/x-www-form-urlencoded",
            # "Cookie": "SHIROJSESSIONID=e80ac335-4d2b-465a-8dcc-b033884dfd60",
            "Host": "221.214.94.41:81",
            "Origin": "http://221.214.94.41:81",
            "Referer": "http://221.214.94.41:81/xyzj/DTFront/ZongHeSearch/?searchType=0",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        # 110  工程勘察
        # 111  工程设计
        # 112  建筑业
        # 113  工程监理
        # 118  施工图审查
        # 123  房地产开发
        # pagesize参数可变 默认12
        type_dict = {"23": "其他企业", "22": "房地产估价", "27": "物业服务", "42": "造价咨询", "18": "招标代理", "10": "检测单位",
                     "13": "施工单位", "14": "监理单位", "16": "设计单位", "15": "勘察单位", "30": "审图机构", "11": "建设单位"}
        for types in [23, 22, 27, 42, 18, 10, 13, 14, 16, 15, 30, 11]:
            pagenum = 1
            pagesize = 12
            while True:
                data_str = "methodname=GetCorpInfo&CorpName=&CorpCode=&CertType={}&LegalMan=&CurrPageIndex={}&PageSize={}".format(
                    types, pagenum, pagesize)
                urls = "http://221.214.94.41:81/InformationReleasing/Ashx/InformationReleasing.ashx?callback=jQuery17102513546121411592_{}".format(
                    int(time.time() * 1000))
                header["User-Agent"] = self.ua.random
                response = requests.post(url=urls, headers=header, data=data_str, verify=False)
                if not response:
                    break
                if response.text == "":
                    break
                response_dumps = re.sub("jQuery.*?\(|\)$", "", response.text)
                data_ = json.loads(response_dumps).get("data")
                if not data_:
                    break
                data_list = data_.get("CorpInfoList")
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    comp_name = every_data.get("CorpName", "")
                    if not comp_name:
                        continue
                    if len(comp_name) <= 3:
                        continue
                    temporary["comp_name"] = comp_name
                    credit_no = every_data.get("CorpCode", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    temporary["oper_name"] = every_data.get("LegalMan", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    temporary["types"] = type_dict.get(types, "")
                    beian_province_name = "山东省"
                    beian_type = "本省" if "山东" in every_data.get("AreaName", "") else "外省"
                    source_name = "山东省住房城乡建设服务监管与信用信息综合平台"
                    # print(temporary, types, beian_type, pagenum)
                    unidd = self.get_unid(temporary["comp_name"], temporary["credit_no"], temporary["oper_name"],
                                          beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)


    # 河南省
    def run_HeNS(self):
        print("河南省开始")
        pagesize = 10000
        # pageSize 参数可变 默认为20
        header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "hngcjs.hnjs.henan.gov.cn",
            "Origin": "http://hngcjs.hnjs.henan.gov.cn",
            "Pragma": "no-cache",
            "Referer": "http://hngcjs.hnjs.henan.gov.cn/company/constructionInfo?searchcorpname=",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }
        for types in ["list", "professionalOperationList", "otherProvinceList", "labourList", "biddingAgencyList"]:
            # url = f"http://hngcjs.hnjs.henan.gov.cn/company/list"  # 建设工程企业
            # url = f"http://hngcjs.hnjs.henan.gov.cn/company/professionalOperationList"  # 专业作业企业
            # url = f"http://hngcjs.hnjs.henan.gov.cn/company/otherProvinceList"  # 省外进豫企业
            # url = f"http://hngcjs.hnjs.henan.gov.cn/company/labourList"  # 旧版劳务企业
            # url = f"http://hngcjs.hnjs.henan.gov.cn/company/biddingAgencyList"  # 招标代理企业
            url = f"http://hngcjs.hnjs.henan.gov.cn/company/{types}"
            pagenum = 1
            data_dict = {
                "pageNum": pagenum,
                "pageSize": pagesize
            }
            while True:
                data_dict["pageNum"] = pagenum
                header["Referer"] = url
                response = self.get_cecond_lv_page(url, header, "post", data_dict)
                if not response:
                    break
                if response.text == "":
                    break
                data_list = response.json()
                if not data_list:
                    break
                data_list = data_list.get("rows")
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    if every_data.get("enterpriseName"):
                        temporary["comp_name"] = every_data["enterpriseName"]
                    elif every_data.get("qiYeMingCheng"):
                        temporary["comp_name"] = every_data["qiYeMingCheng"]
                    elif every_data.get("qiYeFaDingDaiBiaoRen"):
                        temporary["comp_name"] = every_data["qiYeFaDingDaiBiaoRen"]
                    else:
                        temporary["comp_name"] = every_data["corpName"] if every_data.get(
                            "corpName") else every_data.get("corpname")
                    if len(temporary.get("comp_name")) <= 3:
                        continue
                    if every_data.get("enterpriseCode"):
                        temporary["credit_no"] = every_data["enterpriseCode"]
                    elif every_data.get("tongYiSheHuiXinYongDaiMa"):
                        temporary["credit_no"] = every_data["tongYiSheHuiXinYongDaiMa"]
                    elif every_data.get("corpcode"):
                        temporary["credit_no"] = every_data["corpcode"]
                    else:
                        temporary["credit_no"] = every_data["creditCode"] if every_data.get(
                            "creditCode") else every_data.get("corpCode")
                    if every_data.get("qiYeZhuCeShuDi"):
                        temporary["comp_addr"] = every_data.get("qiYeZhuCeShuDi", "")
                    else:
                        temporary["comp_addr"] = every_data.get("address", "")
                    temporary["comp_stay_addr"] = every_data.get("nameStr", "")
                    if every_data.get("qiYeFaDingDaiBiaoRen"):
                        temporary["oper_name"] = every_data.get("qiYeFaDingDaiBiaoRen")
                    elif every_data.get("legalman"):
                        temporary["oper_name"] = every_data.get("legalman")
                    else:
                        temporary["oper_name"] = every_data.get("legalMan") if every_data.get(
                            "legalMan") else every_data.get("enterprisePersonCharge", "")
                    beian_province_name = "河南省"
                    beian_type = "外省" if "otherProvinceList" in url else "本省"
                    # print(temporary, beian_type, pagenum, types)
                    source_name = "河南省建筑市场监管公共服务平台"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                if len(data_list) < pagesize:
                    break
                pagenum += 1
                time.sleep(1)
        self.stop_list.append(None)
        print("河南省结束")

    #湖北省 -- 分类 --不启用
    def run_HBS(self):
        print("湖北省开始")
        #pagesize 参数可变， 默认为10
        for types in [0, 1,2]:
            if types != 2:
                urls = "https://hbjz.hbcic.net.cn/hbythqy/szjs_ythpt/frame/workportal/djgcompanyinfotempaction_yth.action?cmd=getCorpInfoListLS"
            else:
                urls = "https://hbjz.hbcic.net.cn/hbythqy/szjs_ythpt/frame/workportal/djgcompanyinfotempaction_yth.action?cmd=getJeCorpInfoList"
            header = {
                "Accept": "application/json, text/javascript, */*; q=0.01",
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                "Host": "hbjz.hbcic.net.cn",
                "Origin": "http://hbjz.hbcic.net.cn",
                "Referer": urls,  # 首页
                "X-Requested-With": "XMLHttpRequest",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
            }
            data_dict = {"pageindex": "", "pagesize": "", "corpname": "", "corpcode": "", "sfswqy": types, "certtypenum": "", "danweitype": ""}
            pagenum = 0
            pagesize = 10
            data_dict["pagesize"] = pagesize
            while True:
                response = self.get_cecond_lv_page(urls, header, "post", data_dict)
                if not response:
                    break
                data_ = response.json().get("custom")
                if not data_:
                    break
                data_list = data_.get("list")
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    comp_name = every_data.get("danweiname", "")
                    if not comp_name:
                        continue
                    if len(comp_name) <= 3:
                        continue
                    temporary["comp_name"] = comp_name
                    credit_no = every_data.get("socialcode", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    temporary["oper_name"] = every_data.get("farenname", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    comp_type = every_data.get("dwtypename")
                    if comp_type:
                        temporary["comp_type"] = ",".join([eve.get("dwtype") for eve in comp_type if eve.get("dwtype") and eve.get("dwtype") != "null"])
                    beian_province_name = "湖北省"
                    beian_type = "本省" if types == 0 else "外省"
                    source_name = "湖北省建筑市场监督与诚信一体化平台"
                    print(temporary, types, beian_type, pagenum, beian_province_name)
                    unidd = self.get_unid(temporary["comp_name"], temporary["credit_no"], temporary["oper_name"], beian_province_name, beian_type, source_name)
                    # self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                if len(data_list) < pagesize:
                    break
                time.sleep(1)
                if pagenum >= 3:
                    break
        print("湖北省结束")
        self.stop_list.append(None)

    # 湖北省 --获取全部
    def run_HBS2(self):
        print("湖北省开始")
        # pagesize 参数可变， 默认为10
        # urls = "http://hbjz.hbcic.net.cn/hbythqy/szjs_ythpt/frame/workportal/djgcompanyinfotempaction_yth.action?cmd=getCorpInfoList"
        urls = "https://hbjz.hbcic.net.cn/hbythqy/szjs_ythpt/frame/workportal/djgcompanyinfotempaction_yth.action?cmd=getCorpInfoListLS"
        header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "hbjz.hbcic.net.cn",
            "Origin": "http://hbjz.hbcic.net.cn",
            "Cookie": "sid=CBE560FDFF7C44189BA994F98EAB6512",
            "Referer": urls,  # 首页
            "X-Requested-With": "XMLHttpRequest",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }
        data_dict = {"pageindex": "", "pagesize": "", "corpname": "", "corpcode": "", "sfswqy": "", "certtypenum": "", "danweitype": ""}
        pagenum = 0
        pagesize = 1000
        data_dict["pagesize"] = pagesize
        while True:
            data_dict["pageindex"] = pagenum
            # response = self.get_cecond_lv_page(urls, header, "post", data_dict)
            try:
                response = requests.post(url=urls, headers=header, data=data_dict)
            except:
                continue
            if not response:
                break
            data_ = response.json().get("custom")
            if not data_:
                break
            data_list = data_.get("list")
            if not data_list:
                break
            for every_data in data_list:
                temporary = {}
                comp_name = every_data.get("danweiname", "")
                if not comp_name:
                    continue
                if len(comp_name) <= 3:
                    continue
                temporary["comp_name"] = comp_name
                credit_no = every_data.get("socialcode", "")
                if credit_no:
                    temporary["credit_no"] = credit_no
                temporary["oper_name"] = every_data.get("farenname", "")
                if credit_no:
                    temporary["credit_no"] = credit_no
                comp_type = every_data.get("dwtypename")
                if comp_type:
                    temporary["comp_type"] = ",".join([eve.get("dwtype") for eve in comp_type if eve.get("dwtype") and eve.get("dwtype") != "null"])
                beian_province_name = "湖北省"
                beian_type = "外省" if re.search("省|北京|天津|上海", every_data.get("szdsz", "")) else "本省"
                source_name = "湖北省建筑市场监督与诚信一体化平台"
                # print(temporary, beian_type, pagenum, beian_province_name)
                unidd = self.get_unid(temporary["comp_name"], temporary["credit_no"], temporary["oper_name"], beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            pagenum += 1
            if len(data_list) < pagesize:
                break
            time.sleep(1)
        print("湖北省结束")
        self.stop_list.append(None)

    #资质--不启用
    def run_HBS_zz(self, urls, referer_url):
        urls = "http://hbjz.hbcic.net.cn/hbythqy/szjs_ythpt/frame/workportal/djgcompanyinfotempaction_yth.action?cmd=getQyzizhiInfoList"
        header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "hbjz.hbcic.net.cn",
            "Origin": "http://hbjz.hbcic.net.cn",
            "Referer": "http://hbjz.hbcic.net.cn/hbythqy/szjs_ythpt/frame/workportal/enterprisedetailsforsw.html?rowguid=3db33bcc-f177-4b11-a12e-58ed4987f769&corpcode=916100007869904638&CertTypeNum=&dwtype=16",  # 首页
            "X-Requested-With": "XMLHttpRequest",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }

    #广东省
    def run_GDS(self):
        print("广东省启动")
        for types in ["IntoGDEnter", "Enter"]:
            pagenum = 1
            numbers = 1
            while True:
                header = {
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                    "Host": "data.gdcic.net",
                    "Referer": "https://data.gdcic.net/Dop/Open/IntoGDEnterpriseList.aspx",  # 首页
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                url = "https://data.gdcic.net/Dop/Open/{}priseList.aspx".format(types)  # 省内首页
                datas_dict = {
                    "__VIEWSTATE": "",
                    "__VIEWSTATEGENERATOR": "",
                    "__EVENTTARGET": "ctl00$ContentPlaceHolder1$AspNetPager1",
                    "__EVENTARGUMENT": pagenum,
                    "__VIEWSTATEENCRYPTED": "",
                    "ctl00$ContentPlaceHolder1$txtOrgName": "",
                    "ctl00$ContentPlaceHolder1$txtOrgCode": "",
                    "ctl00$ContentPlaceHolder1$ddlCity": "",
                    "ctl00$ContentPlaceHolder1$txtCheckCode": "",
                    "ctl00$ContentPlaceHolder1$hidCheckCodeMark": f"419.2447956301433"
                }
                if pagenum == 1:
                    response = self.get_cecond_lv_page(url, header, "get")
                    if not response:
                        break
                    if response.text == "":
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    try:
                        datas_dict["__VIEWSTATE"] = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        datas_dict["__VIEWSTATEGENERATOR"] = response_html.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')[
                            0]
                        datas = "__VIEWSTATE={}&__VIEWSTATEGENERATOR={}&__EVENTTARGET=ctl00%24ContentPlaceHolder1%24AspNetPager1&__EVENTARGUMENT=&__VIEWSTATEENCRYPTED=&ctl00%24ContentPlaceHolder1%24txtOrgName=&ctl00%24ContentPlaceHolder1%24txtOrgCode=&ctl00%24ContentPlaceHolder1%24ddlCity=&ctl00%24ContentPlaceHolder1%24txtCheckCode=&ctl00%24ContentPlaceHolder1%24hidCheckCodeMark={}.6668378959952".format(
                            quote(datas_dict["__VIEWSTATE"], safe=""), quote(datas_dict["__VIEWSTATEGENERATOR"], safe=""), pagenum,
                            random.randint(300, 950))
                    except:
                        continue
                    data_list = response_html.xpath('//table[@class="data-list"]//tr')
                    if not data_list:
                        break
                    for data_html in data_list[1:]:
                        temporary = {}
                        detail_url = data_html.xpath('./td/a/@href')
                        if detail_url:
                            temporary["detail_url"] = detail_url[0]
                            second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                            if not second_response:
                                continue
                            second_response_html = etree.HTML(second_response.content.decode(second_response.apparent_encoding))
                            comp_name = second_response_html.xpath('//div[@class="ln-title"]/text()')
                            if not comp_name:
                                continue
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                            credit_no = second_response_html.xpath('//div[@id="ent-info"]/div[2]//h5/text()')
                            if credit_no:
                                temporary["credit_no"] = credit_no[0]
                            else:
                                credit_no = second_response_html.xpath('//div[@id="ent-info "]/div[2]//h5/text()')
                                if credit_no:
                                    temporary["credit_no"] = credit_no[0]
                            regist_capi = data_html.xpath('//div[@id="ent-info"]/div[3]//h5/text()')
                            if regist_capi:
                                temporary["regist_capi"] = regist_capi[0]
                            comp_type = data_html.xpath('//div[@id="ent-info"]/div[4]//h5/text()')
                            if comp_type:
                                temporary["comp_type"] = comp_type[0]
                            oper_name = data_html.xpath('//div[@id="ent-info"]/div[5]//h5/text()')
                            if oper_name:
                                temporary["oper_name"] = oper_name[0]
                        else:
                            comp_name = data_html.xpath('./td[1]/a[@href]/text()')
                            if not comp_name:
                                continue
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                            if types == "Enter":
                                credit_no = data_html.xpath("./td[3]/text()")
                                if credit_no:
                                    temporary["credit_no"] = credit_no[0]
                                comp_city = data_html.xpath("./td[2]/text()")
                                if comp_city:
                                    temporary["comp_city"] = comp_city[0]
                                regist_capi = data_html.xpath("./td[4]/text()")
                                if regist_capi:
                                    temporary["regist_capi"] = regist_capi[0]+"万元"
                                start_date = data_html.xpath("./td[5]/text()")
                                if start_date:
                                    temporary["start_date"] = start_date[0]
                            else:
                                oper_name = data_html.xpath("./td[3]/text()")
                                if oper_name:
                                    temporary["oper_name"] = oper_name[0]
                                comp_city = data_html.xpath("./td[2]/text()")
                                if comp_city:
                                    temporary["comp_city"] = comp_city[0]
                                regist_capi = data_html.xpath("./td[4]/text()")
                                if regist_capi:
                                    temporary["regist_capi"] = regist_capi[0] + "万元"
                                start_date = data_html.xpath("./td[5]/text()")
                                if start_date:
                                    temporary["start_date"] = start_date[0]
                        beian_province_name = "广东省"
                        beian_type = "本省" if types == "Enter" else "外省"
                        source_name = "广东省建设行业数据开放平台"
                        # print(temporary, beian_type, pagenum)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                        numbers += 1
                else:
                    datas = datas.replace("&__EVENTARGUMENT=", f"&__EVENTARGUMENT={pagenum}")
                    header["Referer"] = "https://data.gdcic.net/Dop/Open/EnterpriseList.aspx"
                    header["Content-Type"] = "application/x-www-form-urlencoded"
                    response = self.get_cecond_lv_page(url, header, "post", datas)
                    if not response:
                        break
                    if response.text == "":
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    try:
                        datas_dict["__VIEWSTATE"] = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        datas_dict["__VIEWSTATEGENERATOR"] = response_html.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')[0]
                        datas = "__VIEWSTATE={}&__VIEWSTATEGENERATOR={}&__EVENTTARGET=ctl00%24ContentPlaceHolder1%24AspNetPager1&__EVENTARGUMENT=&__VIEWSTATEENCRYPTED=&ctl00%24ContentPlaceHolder1%24txtOrgName=&ctl00%24ContentPlaceHolder1%24txtOrgCode=&ctl00%24ContentPlaceHolder1%24ddlCity=&ctl00%24ContentPlaceHolder1%24txtCheckCode=&ctl00%24ContentPlaceHolder1%24hidCheckCodeMark={}.6668378959952".format(
                            quote(datas_dict["__VIEWSTATE"], safe=""), quote(datas_dict["__VIEWSTATEGENERATOR"], safe=""), pagenum,
                            random.randint(300, 950))
                    except:
                        continue
                    data_list = response_html.xpath('//table[@class="data-list"]//tr')
                    if not data_list:
                        break
                    for data_html in data_list[1:]:
                        temporary = {}
                        detail_url = data_html.xpath('./td/a/@href')
                        if detail_url:
                            temporary["detail_url"] = detail_url[0]
                            second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                            if not second_response:
                                continue
                            second_response_html = etree.HTML(
                                second_response.content.decode(second_response.apparent_encoding))
                            comp_name = second_response_html.xpath('//div[@class="ln-title"]/text()')
                            if not comp_name:
                                continue
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                            credit_no = second_response_html.xpath('//div[@id="ent-info"]/div[2]//h5/text()')
                            if credit_no:
                                temporary["credit_no"] = credit_no[0]
                            else:
                                credit_no = second_response_html.xpath('//div[@id="ent-info "]/div[2]//h5/text()')
                                if credit_no:
                                    temporary["credit_no"] = credit_no[0]
                            regist_capi = data_html.xpath('//div[@id="ent-info"]/div[3]//h5/text()')
                            if regist_capi:
                                temporary["regist_capi"] = regist_capi[0]
                            comp_type = data_html.xpath('//div[@id="ent-info"]/div[4]//h5/text()')
                            if comp_type:
                                temporary["comp_type"] = comp_type[0]
                            oper_name = data_html.xpath('//div[@id="ent-info"]/div[5]//h5/text()')
                            if oper_name:
                                temporary["oper_name"] = oper_name[0]
                        else:
                            comp_name = data_html.xpath('./td[1]/a[@href]/text()')
                            if not comp_name:
                                continue
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                            if types == "Enter":
                                credit_no = data_html.xpath("./td[3]/text()")
                                if credit_no:
                                    temporary["credit_no"] = credit_no[0]
                                comp_city = data_html.xpath("./td[2]/text()")
                                if comp_city:
                                    temporary["comp_city"] = comp_city[0]
                                regist_capi = data_html.xpath("./td[4]/text()")
                                if regist_capi:
                                    temporary["regist_capi"] = regist_capi[0] + "万元"
                                start_date = data_html.xpath("./td[5]/text()")
                                if start_date:
                                    temporary["start_date"] = start_date[0]
                            else:
                                oper_name = data_html.xpath("./td[3]/text()")
                                if oper_name:
                                    temporary["oper_name"] = oper_name[0]
                                comp_city = data_html.xpath("./td[2]/text()")
                                if comp_city:
                                    temporary["comp_city"] = comp_city[0]
                                regist_capi = data_html.xpath("./td[4]/text()")
                                if regist_capi:
                                    temporary["regist_capi"] = regist_capi[0] + "万元"
                                start_date = data_html.xpath("./td[5]/text()")
                                if start_date:
                                    temporary["start_date"] = start_date[0]
                        beian_province_name = "广东省"
                        beian_type = "本省" if types == "Enter" else "外省"
                        source_name = "广东省建设行业数据开放平台"
                        # print(temporary, beian_type, pagenum)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                        numbers += 1
                pagenum += 1
                if numbers >= 300:
                    break
                time.sleep(0.5)
        self.stop_list.append(None)
        print("广东省启动结束")

    # 海南省
    def run_HaiNS(self):
        print("海南省启动")
        type_list = ["建筑业企业资质", "工程监理企业资质", "工程勘察企业资质", "工程设计企业资质", "工程招标代理机构资质", "造价咨询企业资质", "检测机构资质", "预拌混凝土企业",
                     "起重机械检测机构"]
        datas_dict = {}
        for types in type_list:
            datas = "__EVENTTARGET=ID_IntegrityMge_ucCreditCompanyInfoList%24ucPager1%24btnNext&__EVENTARGUMENT=&__VIEWSTATE={}&__VIEWSTATEGENERATOR={}&ID_IntegrityMge_ucCreditCompanyInfoList%24txtProjectName=&ID_IntegrityMge_ucCreditCompanyInfoList%24ddlProvince=%E5%85%A8%E9%83%A8&ID_IntegrityMge_ucCreditCompanyInfoList%24txtValidCode=&ID_IntegrityMge_ucCreditCompanyInfoList%24ucPager1%24txtCurrPage={}"
            header = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "Host": "www.hizj.net:8008",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
            }
            pagenum = 1
            while True:
                urls = "http://www.hizj.net:8008/WebSite_Publish/Default.aspx?action=IntegrityMge/ucCreditCompanyInfoList&Type={}".format(
                    types)
                if pagenum == 1:
                    response = self.get_cecond_lv_page(urls, header, "get")
                    if not response:
                        break
                    if response.text == "":
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    try:
                        datas_dict["__VIEWSTATE"] = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        datas_dict["__VIEWSTATEGENERATOR"] = \
                        response_html.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')[0]
                    except:
                        continue
                    data_list = response_html.xpath(
                        '//table[@id="ID_IntegrityMge_ucCreditCompanyInfoList_gridView"]//a[@id]')
                    for every_html in data_list:
                        temporary = {}
                        detail_url = every_html.xpath('./@href')
                        if not detail_url:
                            continue
                        temporary["detail_url"] = "http://www.hizj.net:8008/WebSite_Publish/" + detail_url[0]
                        second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                        if not second_response:
                            continue
                        if second_response.text == "":
                            continue
                        second_response_html = etree.HTML(
                            second_response.content.decode(second_response.apparent_encoding))
                        comp_name = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_lbCompanyInfoName"]/text()')
                        if comp_name:
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                        credit_no = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_txtOrganization"]/text()')
                        if credit_no:
                            temporary["credit_no"] = credit_no[0]
                        oper_name = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_txtLegalPerson"]/text()')
                        if oper_name:
                            temporary["oper_name"] = oper_name[0]
                        comp_addr = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_lbProvince"]/text()')
                        if comp_addr:
                            temporary["comp_addr"] = comp_addr[0]
                        beian_province_name = "海南省"
                        beian_type = "外省" if temporary.get("comp_addr") != "海南省" else "本省"
                        # print(temporary, beian_type, pagenum, types)
                        source_name = "海南省建筑市场信息公开平台"
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                else:
                    url_two = "http://www.hizj.net:8008/WebSite_Publish/Default.aspx?action=IntegrityMge%2fucCreditCompanyInfoList&Type=".format(
                        self.str_change_pwd(types))
                    datas_copy = datas.format(quote(datas_dict["__VIEWSTATE"], safe=""),
                                              quote(datas_dict["__VIEWSTATEGENERATOR"], safe=""), pagenum - 1)
                    header[
                        "Referer"] = "http://www.hizj.net:8008/WebSite_Publish/Default.aspx?action=IntegrityMge%2fucCreditCompanyInfoList" + f"&Type={self.str_change_pwd(types)}"
                    header["Content-Type"] = "application/x-www-form-urlencoded"
                    header["Origin"] = "http://www.hizj.net:8008"
                    header[
                        "Cookie"] = "ASP.NET_SessionId=n15mw155t2kglu55ldgwik45; ASP.NET_SessionId_NS_Sig=oenCV6mdxz8l5lG_"
                    response = self.get_cecond_lv_page(url_two, header, "post", datas_copy)
                    if not response:
                        break
                    if response.text == "":
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    try:
                        datas_dict["__VIEWSTATE"] = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        datas_dict["__VIEWSTATEGENERATOR"] = \
                            response_html.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')[0]
                    except:
                        continue
                    data_list = response_html.xpath(
                        '//table[@id="ID_IntegrityMge_ucCreditCompanyInfoList_gridView"]//a[@id]')
                    for every_html in data_list:
                        temporary = {}
                        detail_url = every_html.xpath('./@href')
                        if not detail_url:
                            continue
                        temporary["detail_url"] = "http://www.hizj.net:8008/WebSite_Publish/" + detail_url[0]
                        second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                        if not second_response:
                            continue
                        if second_response.text == "":
                            continue
                        second_response_html = etree.HTML(
                            second_response.content.decode(second_response.apparent_encoding))
                        comp_name = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_lbCompanyInfoName"]/text()')
                        if comp_name:
                            if len(comp_name[0]) <= 3:
                                continue
                            temporary["comp_name"] = comp_name[0]
                        credit_no = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_txtOrganization"]/text()')
                        if credit_no:
                            temporary["credit_no"] = credit_no[0]
                        oper_name = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_txtLegalPerson"]/text()')
                        if oper_name:
                            temporary["oper_name"] = oper_name[0]
                        comp_addr = second_response_html.xpath(
                            '//span[@id="ID_IntegrityMge_ucShow_lbProvince"]/text()')
                        if comp_addr:
                            temporary["comp_addr"] = comp_addr[0]
                        beian_province_name = "海南省"
                        beian_type = "外省" if temporary.get("comp_addr") != "海南省" else "本省"
                        # print(temporary, beian_type, pagenum, types)
                        source_name = "海南省建筑市场信息公开平台"
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                if len(data_list) < 15:
                    break
        self.stop_list.append(None)
        print("海南省结束")

    # 重庆市 --只获取列表
    def run_CQS(self):
        print("重庆市开始")
        pagesize = 20
        # 省外 施工企业  外地施工入渝信息报送企业
        for types in ["RYEntInfo/GetRYEntDataList", "LocalEntAndQualiInfo/GetEntDataList"]:  #
            pagenum = 1
            while True:
                urls = "http://183.66.171.75:8888/" + types
                ts = int(time.time()) + 154396645
                header = {
                    "Accept": "application/json, text/javascript, */*; q=0.01",
                    "Content-Type": "application/json",
                    "Host": "183.66.171.75:8888",
                    "Origin": "http://183.66.171.75:8888",
                    "Referer": "http://183.66.171.75:8888/RYEntInfo/RYEntView",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                datas_dict = {
                    "ClickText": "验证成功",
                    "FEnterpriseName": "",
                    "currentPage": pagenum,
                    "pageSize": pagesize,
                    "sort": "xh",
                    "sortOrder": "asc",
                    "ts": ts
                }
                response = self.get_cecond_lv_page(urls, header, "post", json.dumps(datas_dict))
                if not response:
                    break
                if response.text == "":
                    break
                data_list = response.json()["data"]
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    temporary["comp_name"] = every_data.get("企业名称")
                    if len(temporary.get("comp_name")) <= 3:
                        continue
                    credit_no = every_data.get("营业执照注册号", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    comp_addr = every_data.get("注册省市", "")
                    if comp_addr:
                        temporary["comp_addr"] = comp_addr
                    regist_capi = every_data.get("注册资本金币种", "")
                    regist_capi2 = every_data.get("注册资本金", "")
                    if regist_capi and regist_capi2:
                        temporary["regist_capi"] = regist_capi2 + regist_capi
                    econ_kind = every_data.get("经济性质", "")
                    if econ_kind:
                        temporary["econ_kind"] = econ_kind
                    comp_type = types
                    if comp_type:
                        temporary["comp_type"] = comp_type
                    beian_province_name = "重庆市"
                    beian_type = "外省" if types not in ["LocalEntAndQualiInfo/GetEntDataList", "TestEntAndQuali"] else "本省"
                    source_name = "重庆建设工程信息网"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)
        print("重庆市结束")

    # 重庆市 -- 详情页获取信用代码法人
    def run_CQS_copy(self):
        print("重庆市开始")
        pagesize = 100000
        # 省外 施工企业  外地施工入渝信息报送企业
        url_image = "http://183.66.171.75:8888/LocalEntAndQualiInfo/GetImage"
        for types in ["RYEntInfo/GetRYEntDataList", "LocalEntAndQualiInfo/GetEntDataList"]:
            pagenum = 1
            while True:
                urls = "http://183.66.171.75:8888/" + types
                ts = int(time.time()) + 154396645
                header = {
                    "Accept": "application/json, text/javascript, */*; q=0.01",
                    "Content-Type": "application/json",
                    "Host": "183.66.171.75:8888",
                    "Origin": "http://183.66.171.75:8888",
                    "Referer": "http://183.66.171.75:8888/RYEntInfo/RYEntView",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                datas_dict = {
                    "ClickText": "验证成功",
                    "FEnterpriseName": "",
                    "currentPage": pagenum,
                    "pageSize": pagesize,
                    "sort": "xh",
                    "sortOrder": "asc",
                    "ts": ts
                }
                datas_image = {"ts": ts}
                response = self.get_cecond_lv_page(urls, header, "post", json.dumps(datas_dict))
                if not response:
                    break
                if response.text == "":
                    break
                data_list = response.json()["data"]
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    temporary["comp_name"] = every_data.get("企业名称")
                    if len(temporary.get("comp_name")) <= 3:
                        continue
                    if types == "RYEntInfo/GetRYEntDataList":
                        second_url = "http://183.66.171.75:8888/RYEntInfo/GetRYEntAndQualiInfo"
                    else:
                        second_url = "http://183.66.171.75:8888/LocalEntAndQualiInfo/GetEntAndQualiInfo"
                    second_data = {"strEntName": temporary["comp_name"], "ts": ts, "ClickText": ""}
                    header["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
                    header["Cookie"] = "ASP.NET_SessionId=3s023etacj0pilkfqv4tjzjs"
                    header["X-Requested-With"] = "XMLHttpRequest"
                    self.get_cecond_lv_page(url_image, header, "post", datas_image)
                    time.sleep(1)
                    second_resu = requests.post(url=second_url, headers=header, data=second_data)
                    # second_resu = self.get_cecond_lv_page(second_url, header, "post", second_data)
                    if not second_resu:
                        continue
                    if second_resu.text == "":
                        continue
                    second_resu_json = second_resu.json()
                    if not second_resu_json:
                        continue
                    data_detail = second_resu_json.get("RYEntInfo")
                    if not data_detail:
                        continue
                    credit_no = data_detail.get("营业执照注册号", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no
                    oper_name = data_detail.get("法定代表人", "")
                    if oper_name:
                        temporary["oper_name"] = oper_name
                    comp_addr = data_detail.get("注册省市", "")
                    if comp_addr:
                        temporary["comp_addr"] = comp_addr
                    regist_capi = data_detail.get("注册资本金币种", "")
                    if regist_capi:
                        temporary["regist_capi"] = regist_capi
                    econ_kind = data_detail.get("经济性质", "")
                    if econ_kind:
                        temporary["econ_kind"] = econ_kind
                    temporary["comp_type"] = types
                    beian_province_name = "重庆市"
                    beian_type = "外省" if types not in ["LocalEntAndQualiInfo/GetEntDataList", "TestEntAndQuali"] else "本省"
                    source_name = "重庆建设工程信息网"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)
        print("重庆市结束")

    # 重庆市
    def run_CQS2(self):
        urls = "http://183.66.171.75:88/CQCollect/Qy_Query/Sgqy/Sgqy_List.aspx"  # 旧版劳务企业
        for types in ["Sgqy/Sgqy_List"]:
            header = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "Host": "183.66.171.75:88",
                "Referer": "http://183.66.171.75:88/CQCollect/Qy_Query/Jlqy/WdJlqy_List.aspx"
            }
            data_dict = {}
            pagenum = 1
            pagesize = 20
            while True:
                urls = "http://183.66.171.75:88/CQCollect/Qy_Query/{}.aspx".format(types)
                if pagenum == 1:
                    response = self.get_cecond_lv_page(urls, header, "get")
                    if not response:
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    data_list = response_html.xpath('//table[@id="DataGrid1"]//tr/td[2]//a/@href')
                    try:
                        data_dict["__VIEWSTATE"] = response_html.xpath('//input[@name="__VIEWSTATE"]/@value')[0]
                        EVENTTARGET = response_html.xpath('//a[@id][3]/@href')[0]
                        data_dict["__EVENTTARGET"] = re.search("__doPostBack\('(.*?)','", EVENTTARGET).groups()[0].replace("$", ":")
                    except:
                        continue
                    if not data_list:
                        break
                    for date_html in data_list:
                        temporary = {}
                        date_url = re.findall("'(.*?)'", date_html)
                        if len(date_url) != 2:
                            continue
                        data_str = "__EVENTTARGET={}&__EVENTARGUMENT={}&__VIEWSTATE={}&FName=&Pager1%3ANewPage=".format(quote(date_url[0].replace("$", ":"), safe=""), quote(date_url[1], safe=""), quote(data_dict["__VIEWSTATE"], safe=""))
                        header["Origin"] = "http://183.66.171.75:88"
                        header["Content-Type"] = "application/x-www-form-urlencoded"
                        header["Referer"] = urls
                        second_repose = self.get_cecond_lv_page(urls, header, "post", data_str)
                        if not second_repose:
                            continue
                        link_url = re.search("document\.location\.href\s*=(?:'|\\')(.*?)(?:'|\\');</script>", second_repose.text)
                        if not link_url:
                            continue
                        third_header = {
                            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                            "Host": "183.66.171.75:88",
                            "Referer": urls,
                            "Upgrade-Insecure-Requests": "1"
                        }
                        third_url = "http://183.66.171.75:88/CQCollect/Qy_Query/" + types.split("/")[0] + "/" + link_url.groups()[0]
                        third_response = self.get_cecond_lv_page(third_url, third_header, "get")
                        if not third_response:
                            continue
                        third_response_html = etree.HTML(third_response.content.decode(third_response.apparent_encoding))

                        comp_name = third_response_html.xpath('//span[@id="FName"]/text()')
                        if not comp_name:
                            continue
                        if len(comp_name[0]) <= 3:
                            continue
                        temporary["comp_name"] = comp_name[0]
                        credit_no = third_response_html.xpath('//span[@id="FLicence"]/text()')
                        if credit_no:
                            temporary["credit_no"] = credit_no[0]
                        comp_addr = third_response_html.xpath('//span[@id="FLoginaddress"]/text()')
                        if comp_addr:
                            temporary["comp_addr"] = comp_addr[0]
                        regist_capi = third_response_html.xpath('//span[@id="FValue"]/text()')
                        if regist_capi:
                            temporary["regist_capi"] = regist_capi[0]
                        econ_kind =third_response_html.xpath('//span[@id="FType"]/text()')
                        if econ_kind:
                            temporary["econ_kind"] = econ_kind[0]
                        beian_province_name = "重庆市"
                        beian_type = "外省" if types in ["Jlqy/WdJlqy_List"] else "本省"
                        source_name = "重庆建设工程信息网"
                        # print(temporary, beian_type, pagenum)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                else:
                    second_data_str = "__EVENTTARGET={}&__EVENTARGUMENT={}&__VIEWSTATE={}&FName=&Pager1%3ANewPage=".format(quote(data_dict["__EVENTTARGET"], safe=""), "", quote(data_dict["__VIEWSTATE"], safe=""))
                    second_response = self.get_cecond_lv_page(urls, header, "post", second_data_str)
                    if not second_response:
                        break
                    second_response_html = etree.HTML(second_response.content.decode(second_response.apparent_encoding))
                    data_list = second_response_html.xpath('//table[@id="DataGrid1"]//tr/td[2]//a/@href')
                    try:
                        data_dict["__VIEWSTATE"] = second_response_html.xpath('//input[@name="__VIEWSTATE"]/@value')[0]
                    except:
                        continue
                    if not data_list:
                        break
                    for date_html in data_list:
                        temporary = {}
                        date_url = re.findall("'(.*?)'", date_html)
                        if len(date_url) != 2:
                            continue
                        data_str = "__EVENTTARGET={}&__EVENTARGUMENT={}&__VIEWSTATE={}&FName=&Pager1%3ANewPage=".format(quote(date_url[0].replace("$", ":"), safe=""), quote(date_url[1], safe=""), quote(data_dict["__VIEWSTATE"], safe=""))
                        header["Origin"] = "http://183.66.171.75:88"
                        header["Content-Type"] = "application/x-www-form-urlencoded"
                        header["Referer"] = urls
                        second_repose = self.get_cecond_lv_page(urls, header, "post", data_str)
                        link_url = re.search("document\.location\.href\s*='(.*?)';<", second_repose.text)
                        if not link_url:
                            continue
                        third_header = {
                            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                            "Host": "183.66.171.75:88",
                            "Referer": urls,
                            "Upgrade-Insecure-Requests": "1"
                        }
                        third_url = "http://183.66.171.75:88/CQCollect/Qy_Query/" + types.split("/")[0] + "/" + link_url.groups()[0]
                        third_response = self.get_cecond_lv_page(third_url, third_header, "get")
                        if not third_response:
                            continue
                        try:
                            third_response_html = etree.HTML(third_response.content.decode(third_response.apparent_encoding))
                        except:
                            if third_response.apparent_encoding.lower() == "gb2312":
                                third_response_html = etree.HTML(third_response.content.decode("GBK"))
                            else:
                                third_response_html = etree.HTML(third_response.content.decode("utf-8"))
                        comp_name = third_response_html.xpath('//span[@id="FName"]/text()')
                        if not comp_name:
                            continue
                        if len(comp_name[0]) <= 3:
                            continue
                        temporary["comp_name"] = comp_name[0]
                        credit_no = third_response_html.xpath('//span[@id="FLicence"]/text()')
                        if credit_no:
                            temporary["credit_no"] = credit_no[0]
                        comp_addr = third_response_html.xpath('//span[@id="FLoginaddress"]/text()')
                        if comp_addr:
                            temporary["comp_addr"] = comp_addr[0]
                        regist_capi = third_response_html.xpath('//span[@id="FValue"]/text()')
                        if regist_capi:
                            temporary["regist_capi"] = regist_capi[0]
                        econ_kind =third_response_html.xpath('//span[@id="FType"]/text()')
                        if econ_kind:
                            temporary["econ_kind"] = econ_kind[0]
                        beian_province_name = "重庆市"
                        beian_type = "外省" if types in ["Jlqy/WdJlqy_List"] else "本省"
                        source_name = "重庆建设工程信息网"
                        # print(temporary, beian_type, pagenum)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)

    #重庆市
    def run_CQS3(self):
        header = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Host": "183.66.171.75:88",
            "Referer": "http://183.66.171.75:88/CQCollect/Qy_Query/Jlqy/WdJlqy_List.aspx"
        }
        pageseize = 20
        for types in ["Jlqy/Jlqy_List", "Jlqy/WdJlqy_List", "Zjzxjg/Zjzxjg_List"]:  #
            pagenum = 1
            data_dict = {}
            urls = "http://183.66.171.75:88/CQCollect/Qy_Query/{}.aspx".format(types)
            while True:
                if pagenum == 1:
                    response = self.get_cecond_lv_page(urls, header, "get")
                    if not response:
                        break
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    data_list = response_html.xpath('//table[@id="DataGrid1"]/tr[not(@align="Center")]')
                    try:
                        data_dict["__VIEWSTATE"] = response_html.xpath('//input[@name="__VIEWSTATE"]/@value')[0]
                        EVENTTARGET = response_html.xpath('//a[@id][3]/@href')[0]
                        data_dict["__EVENTTARGET"] = re.search("__doPostBack\('(.*?)','", EVENTTARGET).groups()[0].replace("$", ":")
                    except:
                        continue
                    if not data_list:
                        break
                    for data_html in data_list:
                        temporary = {}
                        comp_name = data_html.xpath('./td[2]/font/text()')
                        if not comp_name:
                            comp_name = data_html.xpath('./td[2]/font/a/font/text()')
                            if not comp_name:
                                continue
                        if len(comp_name[0]) <= 3:
                            continue
                        temporary["comp_name"] = comp_name[0]
                        beian_province_name = "重庆市"
                        beian_type = "外省" if types in ["Jlqy/WdJlqy_List"] else "本省"
                        source_name = "重庆建设工程信息网"
                        # print(temporary, beian_type, pagenum)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                else:
                    header["Referer"] = urls
                    header["Origin"] = "http://183.66.171.75:88"
                    header["Content-Type"] = "application/x-www-form-urlencoded"
                    data_str = "__EVENTTARGET={}&__EVENTARGUMENT=&__VIEWSTATE={}&txt_EnpName=&TurnPage1%3APageNum=".format(quote(data_dict["__EVENTTARGET"], safe=""), quote(data_dict["__VIEWSTATE"], safe=""))
                    response = self.get_cecond_lv_page(urls, header, "post", data_str)
                    if not response:
                        break
                    try:
                        response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    except:
                        if response.apparent_encoding.lower() == "gb2312":
                            response_html = etree.HTML(response.content.decode("GBK"))
                        else:
                            response_html = etree.HTML(response.content.decode("utf-8"))
                    data_list = response_html.xpath('//table[@id="DataGrid1"]/tr[not(@align="Center")]')
                    try:
                        data_dict["__VIEWSTATE"] = response_html.xpath('//input[@name="__VIEWSTATE"]/@value')[0]
                        EVENTTARGET = response_html.xpath('//a[@id][3]/@href')[0]
                        data_dict["__EVENTTARGET"] = re.search("__doPostBack\('(.*?)','", EVENTTARGET).groups()[0].replace("$", ":")
                    except:
                        continue
                    if not data_list:
                        break
                    for data_html in data_list:
                        temporary = {}
                        comp_name = data_html.xpath('./td[2]/font/text()')
                        if not comp_name:
                            comp_name = data_html.xpath('./td[2]/font/a/font/text()')
                            if not comp_name:
                                continue
                        if len(comp_name[0]) <= 3:
                            continue
                        temporary["comp_name"] = comp_name[0]
                        beian_province_name = "重庆市"
                        beian_type = "外省" if types in ["Jlqy/WdJlqy_List"] else "本省"
                        source_name = "重庆建设工程信息网"
                        # print(temporary, beian_type, pagenum)
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pageseize:
                    break
        self.stop_list.append(None)

    #云南省
    def run_YNS(self):
        print("云南省启动")
        page_size = 20
        type_dict = {"建筑业企业": "jzy",
                     "工程监理企业": "jj",
                     "质量检测机构": "zljc",
                     "招标代理机构": "zbdl",
                     "园林绿化企业": "yllh",
                     " 勘察设计企业": "kcsj",
                     "入滇企业": "rddj"}
        type_list = ["", "jzy", "jj", "zljc", "zbdl", "yllh", "kcsj", "rddj"]
        for types in type_list:
            pagenum = 1
            while True:
                times = int(time.time() * 1000) - 1
                header = {
                    "Accept": "application/json, text/javascript, */*; q=0.01",
                    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                    "Host": "www.ynjzjgcx.com",
                    "Origin": "https://www.ynjzjgcx.com",
                    "Referer": "https://www.ynjzjgcx.com/webHtml/ent/index.html?n=data&t=%s" % (times - 10),
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                url = "https://www.ynjzjgcx.com/web/ent/getEnterpriseList"
                datas_dict = {
                    "pageNo": pagenum,
                    "pageSize": page_size,
                    "entName": "",
                    "entType": types,
                    "entPossession": "",
                    "code": "",
                    "captchaVerification": "",
                    "t": times
                }
                response = self.get_cecond_lv_page(url, header, "post", datas_dict)
                if not response:
                    break
                if response.text == "":
                    break
                try:
                    data_list = response.json()["list"]
                except:
                    time.sleep(1)
                    if response.json().get("message"):
                        break
                    continue
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    temporary["comp_name"] = every_data.get("name")
                    if len(temporary.get("comp_name")) <= 3:
                        continue
                    temporary["credit_no"] = every_data.get("creditCode")
                    temporary["comp_addr"] = every_data.get("countyName", "")
                    temporary["comp_type"] = every_data.get("regType", "")
                    beian_province_name = "云南省"
                    beian_type = "外省" if types != "rddj" else "本省"
                    source_name = "云南省建筑市场监管与诚信信息网"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    # print(temporary, beian_type, pagenum, types)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < page_size:
                    break
        self.stop_list.append(None)
        print("云南省结束")

    #西藏自治区
    def run_XZZZQU(self):
        print("西藏自治区开始")
        # PageSize 参数可变 默认30
        pagenum = 1
        pagesize = 200000
        while True:
            header = {
                "Accept": "*/*",
                "Accept-Encoding": "gzip, deflate",
                "Content-Type": "application/x-www-form-urlencoded",
                "Host": "jzsc.zjt.xizang.gov.cn:8012",
                "Origin": "http://jzsc.zjt.xizang.gov.cn:8012",
                "Pragma": "no-cache",
                "Referer": "http://jzsc.zjt.xizang.gov.cn:8012/CorpInfo.html",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
            }
            url = "http://jzsc.zjt.xizang.gov.cn:8012/Ajax/DataHandler.ashx?method=QueryCorpTable"
            datas_dict = {"CorpName": None, "CertTypeName": None, "PageIndex": f"{pagenum}", "PageSize": f"{pagesize}"}  # PageSize 参数可变
            response = self.get_cecond_lv_page(url, header, "post", json.dumps(datas_dict))
            if not response:
                break
            if response.text == "":
                break
            pagenum += 1
            time.sleep(0.5)
            data_list = response.json().get("data")
            if not data_list:
                break
            for every_data in data_list:
                temporary = {}
                temporary["comp_name"] = every_data.get("corpname")
                if len(temporary.get("comp_name")) <= 3:
                    continue
                temporary["credit_no"] = every_data.get("corpcode")
                temporary["comp_addr"] = every_data.get("address", "")
                temporary["comp_type"] = every_data.get("CertTypeName", "")
                temporary["oper_name"] = every_data.get("legalman")
                temporary["econ_kind"] = every_data.get("ECONTYPENAME")
                beian_province_name = "西藏自治区"
                beian_type = "外省" if every_data["isquwai"] == 1 else "本省"
                source_name = "西藏自治区建筑市场监管与诚信信息平台"
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            if len(data_list) < pagesize:
                break
        self.stop_list.append(None)
        print("西藏自治区结束")

    # 陕西省
    def run_ShanXS(self):
        print("陕西省")
        # ddl_SystemId=180 入陕施工企业
        # ddl_SystemId=325 入陕监理企业
        # ddl_SystemId=320 入陕招标企业
        for types in [180, 325, 320]:
            data_str = "__VIEWSTATE={}&__VIEWSTATEGENERATOR={}&__EVENTTARGET=Pager1&__EVENTARGUMENT=1&__EVENTVALIDATION={}&txtFName=&txtFCertiNo=&ddl_SystemId=%s&Pager1_input=1" % (
                types)
            data_dict = {"__VIEWSTATE": "", "__VIEWSTATEGENERATOR": "", "__EVENTTARGET": "Pager1",
                         "__EVENTARGUMENT": 1,
                         "__EVENTVALIDATION": "", "txtFName": "", "txtFCertiNo": "", "ddl_SystemId": "",
                         "Pager1_input": ""}
            pagenum = 1
            while True:
                header = {
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                    "Accept-Encoding": "gzip, deflate, br",
                    "Host": "js.shaanxi.gov.cn:9010",
                    "Pragma": "no-cache",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                url = "https://js.shaanxi.gov.cn:9010/SxApp/Share/Web/RSSgqyList.aspx"
                if pagenum == 1:
                    if types == 180:
                        response = self.get_cecond_lv_page(url, header, "get")
                        response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    else:  # 非陕西本地企业需要改变请求方式
                        data_copy = data_str + "btnQuery=查询"
                        response = self.get_cecond_lv_page(url, header, data_copy, "get")
                        if not response:
                            break
                        if response.text == "":
                            break
                        # response = requests.post(url=url, headers=header, data=data_copy, verify=False)
                        response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    try:
                        VIEWSTATE = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        VIEWSTATEGENERATOR = response_html.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')[0]
                        EVENTVALIDATION = response_html.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0]
                        data_copy = data_str.format(quote(VIEWSTATE, safe=""), VIEWSTATEGENERATOR,
                                                    quote(EVENTVALIDATION, safe=""))
                        data_dict["__VIEWSTATE"] = VIEWSTATE
                        data_dict["__VIEWSTATEGENERATOR"] = VIEWSTATEGENERATOR
                        data_dict["__EVENTVALIDATION"] = EVENTVALIDATION
                    except:
                        continue
                    data_list = response_html.xpath('//div[@class="reg_mid"]//tr[@class="m_dg1_i"]')
                    temporary = {}
                    for data_html in data_list:
                        comp_name = data_html.xpath('./td[2]/text()')
                        if comp_name:
                            temporary["comp_name"] = comp_name[0].strip()
                            if len(temporary.get("comp_name")) <= 3:
                                continue
                        else:
                            continue
                        comp_addr = data_html.xpath('./td[3]/text()')
                        if comp_addr:
                            temporary["comp_addr"] = comp_addr[0].strip()
                        else:
                            continue
                        detail_url = data_html.xpath('./td[6]/a/@href')
                        if detail_url:
                            temporary["detail_url"] = "https://js.shaanxi.gov.cn:9010/SxApp/Share/Web/" + \
                                                      detail_url[0].strip()
                        else:
                            continue
                        header["Referer"] = "https://js.shaanxi.gov.cn:9010/SxApp/Share/Web/RSSgqyList.aspx"
                        second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                        if not second_response:
                            continue
                        second_response_html = etree.HTML(
                            second_response.content.decode(second_response.apparent_encoding))
                        reg_no = second_response_html.xpath('//span[@id="t_FLicence"]/text()')
                        if reg_no:
                            reg_no = reg_no[0].strip()
                        org_no = second_response_html.xpath('//span[@id="t_FJuridcialCode"]/text()')
                        if reg_no:
                            org_no = org_no[0].strip()
                        if org_no == reg_no and reg_no != "":
                            temporary["credit_no"] = org_no
                        else:
                            temporary["org_no"] = org_no
                            temporary["reg_no"] = reg_no
                        oper_name = second_response_html.xpath('//span[@id="l1_FName"]/text()')
                        if oper_name:
                            temporary["oper_name"] = oper_name[0].strip()
                        beian_province_name = "陕西省"
                        beian_type = "本省" if "陕西" in temporary["comp_addr"] else "外省"
                        # print(temporary, beian_type, types, pagenum)
                        source_name = "外省入陕建筑业企业登记信息查询"
                        unidd = self.get_unid(re.sub("\s", "", temporary["comp_name"]), temporary.get("credit_no", ""),  temporary.get("oper_name", ""), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                        temporary = {}
                else:
                    header["Referer"] = "https://js.shaanxi.gov.cn:9010/SxApp/Share/Web/RSSgqyList.aspx"
                    header["Content-Type"] = "application/x-www-form-urlencoded"
                    header["Origin"] = "https://js.shaanxi.gov.cn:9010"
                    data_copy = re.sub("__EVENTARGUMENT=\d+", f"__EVENTARGUMENT={pagenum}", data_copy)
                    data_copy = re.sub("Pager1_input=\d+", f"Pager1_input={pagenum - 1}", data_copy)
                    data_dict["__EVENTARGUMENT"] = pagenum
                    data_dict["Pager1_input"] = pagenum - 1
                    response = self.get_cecond_lv_page(url, header, "post", data_copy)
                    if not response:
                        break
                    if response.text == "":
                        break
                    # response = requests.post(url=url, headers=header, data=data_copy, verify=False)
                    response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                    try:
                        VIEWSTATE = response_html.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                        VIEWSTATEGENERATOR = response_html.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')[0]
                        EVENTVALIDATION = response_html.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0]
                        data_copy = data_str.format(quote(VIEWSTATE, safe=""), VIEWSTATEGENERATOR,
                                                    quote(EVENTVALIDATION, safe=""))
                        data_dict["__VIEWSTATE"] = VIEWSTATE
                        data_dict["__VIEWSTATEGENERATOR"] = VIEWSTATEGENERATOR
                        data_dict["__EVENTVALIDATION"] = EVENTVALIDATION
                    except:
                        continue
                    data_list = response_html.xpath('//div[@class="reg_mid"]//tr[@class="m_dg1_i"]')
                    if not data_list:
                        break
                    temporary = {}
                    for data_html in data_list:
                        comp_name = data_html.xpath('./td[2]/text()')
                        if comp_name:
                            temporary["comp_name"] = comp_name[0].strip()
                            if len(temporary.get("comp_name")) <= 3:
                                continue
                        else:
                            continue
                        comp_addr = data_html.xpath('./td[3]/text()')
                        if comp_addr:
                            temporary["comp_addr"] = comp_addr[0].strip()
                        else:
                            continue
                        detail_url = data_html.xpath('./td[6]/a/@href')
                        if detail_url:
                            temporary["detail_url"] = "https://js.shaanxi.gov.cn:9010/SxApp/Share/Web/" + \
                                                      detail_url[
                                                          0].strip()
                        else:
                            continue
                        header["Referer"] = "https://js.shaanxi.gov.cn:9010/SxApp/Share/Web/RSSgqyList.aspx"
                        second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                        if not second_response:
                            continue
                        second_response_html = etree.HTML(
                            second_response.content.decode(second_response.apparent_encoding))
                        reg_no = second_response_html.xpath('//span[@id="t_FLicence"]/text()')
                        if reg_no:
                            reg_no = reg_no[0].strip()
                        org_no = second_response_html.xpath('//span[@id="t_FJuridcialCode"]/text()')
                        if reg_no:
                            org_no = org_no[0].strip()
                        if org_no == reg_no and reg_no != "":
                            temporary["credit_no"] = org_no
                        else:
                            temporary["org_no"] = org_no
                            temporary["reg_no"] = reg_no
                        oper_name = second_response_html.xpath('//span[@id="l1_FName"]/text()')
                        if oper_name:
                            temporary["oper_name"] = oper_name[0].strip()
                        beian_province_name = "陕西省"
                        beian_type = "本省" if "陕西" in temporary["comp_addr"] else "外省"
                        # print(temporary, beian_type, types, pagenum)
                        source_name = "外省入陕建筑业企业登记信息查询"
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                        temporary = {}
                    if len(data_list) < 20:
                        break
                pagenum += 1
                time.sleep(0.5)
        self.stop_list.append(None)
        print("陕西省结束")

    #甘肃省
    def run_GSS(self):
        print("甘肃省开始")
        pagenum = 1
        pagesize = 100000
        header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Host": "61.178.108.56:28800",
            "X-Requested-With": "XMLHttpRequest",
            "Referer": "http://61.178.108.56:28800/webserver/jjtxm/index.html"
        }
        while True:
            urls = "http://61.178.108.56:28800/DataServer/Api/OpenDataView?dataGuid=2f74e887-9f2c-44c1-9002-e0200c7216da&appKey=c9406731-50bc-4e23-b3f5-b7e7d3093875&sign=CXFnz3D2oWe660GObwSZag==&time=1628663300000&pagenum=1&limit=10&pagenum={}&limit={}".format(pagenum, pagesize)
            response = self.get_cecond_lv_page(urls, header, "get")
            if not response:
                break
            response_json = response.json()
            if not response_json:
                break
            data_list = response_json["Data"]
            if not data_list:
                break
            for every_data in data_list:
                temporary = {}
                comp_name = every_data.get("CORPNAME", "")
                if not comp_name:
                    continue
                if len(comp_name) <= 3:
                    continue
                temporary["comp_name"] = comp_name
                credit_no = every_data.get("CORPCODE", "")
                if credit_no:
                    temporary["credit_no"] = credit_no
                oper_name = every_data.get("LEGALMAN", "")
                if oper_name:
                    temporary["oper_name"] = oper_name
                beian_province_name = "甘肃省"
                beian_type = "外省"
                # print(temporary, beian_type, pagenum)
                source_name = "甘肃省住房和城乡建设厅"
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            pagenum += 1
            time.sleep(0.5)
            if len(data_list) < pagesize:
                break
        self.stop_list.append(None)
        print("甘肃省结束")

    #青海省
    def run_QHS(self):
        print("青海省")
        pagenum = 1
        data_dict = {}
        """
        备案地非青海省本地为入青企业
        """
        while True:
            header = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "Accept-Encoding": "gzip, deflate",
                "Host": "139.170.150.135",
                "Pragma": "no-cache",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
            }
            url = "http://139.170.150.135/dataservice/query/comp/list"  # 首页
            if pagenum == 1:
                response = self.get_cecond_lv_page(url, header, "get")
                response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                data_list = response_html.xpath('//table[@class="table_box"]//tr[@onclick]')
                try:
                    numbers = response_html.xpath('//b[@class="datatimer"]/@data-to')[0]
                except:
                    time.sleep(1)
                    continue
                if not data_list:
                    break
                for data_html in data_list:
                    temporary = {}
                    detail_url = data_html.xpath('./@onclick')
                    if detail_url:
                        temporary["detail_url"] = "http://139.170.150.135" + detail_url[0].split(".href='")[1].replace("'", "")
                    else:
                        continue
                    header["Referer"] = "http://139.170.150.135/dataservice/query/comp/list"
                    second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                    if not second_response:
                        continue
                    second_response_html = etree.HTML(second_response.content.decode(second_response.apparent_encoding))
                    comp_name = second_response_html.xpath('//span[@class="user-name"]/text()')
                    if comp_name:
                        temporary["comp_name"] = comp_name[0].strip()
                        if len(temporary.get("comp_name")) <= 3:
                            continue
                    else:
                        continue
                    credit_no = second_response_html.xpath('//div[@class="bottom"]/dl[1]/dt/text()')
                    if credit_no:
                        temporary["credit_no"] = credit_no[0].strip()
                    oper_name = second_response_html.xpath('//div[@class="bottom"]/dl[2]/dd/text()')
                    if oper_name:
                        temporary["oper_name"] = oper_name[0].strip()
                    comp_addr = second_response_html.xpath('//div[@class="bottom"]/dl[4]/dd/text()')
                    if comp_addr:
                        temporary["comp_addr"] = comp_addr[0].strip()
                    comp_type = second_response_html.xpath('//div[@class="bottom"]/dl[3]/dd/text()')
                    if comp_type:
                        temporary["comp_type"] = comp_type[0].strip()
                    beian_province_name = "青海省"
                    beian_type = "本省" if "青海" in temporary["comp_addr"] else "外省"
                    # print(temporary, beian_type, pagenum)
                    source_name = "青海省工程建设监管和信用管理平台"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                if len(data_list) < 15:
                    break
            else:
                if pagenum == 2:
                    data_dict["$total"] = int(numbers)
                    data_dict["$reload"] = 0
                    data_dict["$pgsz"] = 15
                    header["Referer"] = "http://139.170.150.135/dataservice/query/comp/list"
                    header["Content-Type"] = "application/x-www-form-urlencoded"
                    header["Origin"] = "http://139.170.150.135"
                data_dict["$pg"] = pagenum
                # response = requests.post(url=url, headers=header, data=data_dict, verify=False)
                response = self.get_cecond_lv_page(url, header, "post", data_dict)
                if not response:
                    break
                if response.text == "":
                    break
                response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                data_list = response_html.xpath('//table[@class="table_box"]//tr[@onclick]')
                try:
                    numbers = response_html.xpath('//b[@class="datatimer"]/@data-to')[0]
                except:
                    time.sleep(1)
                    continue
                if not data_list:
                    break
                for data_html in data_list:
                    temporary = {}
                    detail_url = data_html.xpath('./@onclick')
                    if detail_url:
                        temporary["detail_url"] = "http://139.170.150.135" + detail_url[0].split(".href='")[1].replace("'", "")
                    else:
                        continue
                    header["Referer"] = "http://139.170.150.135/dataservice/query/comp/list"
                    second_response = self.get_cecond_lv_page(temporary["detail_url"], header, "get")
                    if not second_response:
                        continue
                    second_response_html = etree.HTML(second_response.content.decode(second_response.apparent_encoding))
                    comp_name = second_response_html.xpath('//span[@class="user-name"]/text()')
                    if comp_name:
                        temporary["comp_name"] = comp_name[0].strip()
                        if len(temporary.get("comp_name")) <= 3:
                            continue
                    else:
                        continue
                    credit_no = second_response_html.xpath('//div[@class="bottom"]/dl[1]/dt/text()')
                    if credit_no:
                        temporary["credit_no"] = credit_no[0].strip()
                    oper_name = second_response_html.xpath('//div[@class="bottom"]/dl[2]/dd/text()')
                    if oper_name:
                        temporary["oper_name"] = oper_name[0].strip()
                    comp_addr = second_response_html.xpath('//div[@class="bottom"]/dl[4]/dd/text()')
                    if comp_addr:
                        temporary["comp_addr"] = comp_addr[0].strip()
                    comp_type = second_response_html.xpath('//div[@class="bottom"]/dl[3]/dd/text()')
                    if comp_type:
                        temporary["comp_type"] = comp_type[0].strip()
                    beian_province_name = "青海省"
                    beian_type = "本省" if "青海" in temporary.get("comp_addr", "") else "外省"
                    # print(temporary, beian_type, pagenum)
                    source_name = "青海省工程建设监管和信用管理平台"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                if len(data_list) < 15:
                    break
            pagenum += 1
            time.sleep(0.5)
        self.stop_list.append(None)
        print("青海省结束")

    #宁夏回族自治区
    def run_NXHZZZQ(self):
        print("宁夏回族自治区启动")
        #pagesize参数可变，默认15
        pagesize = 200
        type_list = [1,2,3,4,5,6,7,10,11,12]
        for types in ["JN02"]: #"JN01",
            for second_types in type_list:
                pagenum = 1
                header = {
                    "Accept": "*/*",
                    "Accept-Encoding": "gzip, deflate",
                    "Host": "222.75.70.198:28092",
                    "Origin": "http://222.75.70.198:28092",
                    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                    "Pragma": "no-cache",
                    "Referer": "http://222.75.70.198:28092/jzptweb/company_list.html?qualification=1&islocal=JN01",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
                }
                data_dict = {
                    "pagenum": 1,
                    "resid": "web_company.quaryCorp",
                    "ci_qualification_code": second_types,
                    "ci_islocal_code": types,
                    "rows": pagesize
                }
                url = "http://222.75.70.198:28092/portal.php?"  # 首页
                while True:
                    data_dict["pagenum"] = pagenum
                    data_str = "page={}&resid=web_company.quaryCorp&ci_qualification_code=1&ci_islocal_code=JN02&rows={}".format(pagenum, pagesize)
                    response = self.get_cecond_lv_page(url, header, "get", data_str)
                    # response = self.get_cecond_lv_page(url, header, "post", data_dict)
                    if not response:
                        break
                    if response.text == "":
                        break
                    try:
                        response_text = re.sub("\s", "", response.text)
                    except:
                        continue
                    data_list = json.loads(response_text)["data"]
                    if not data_list:
                        break
                    # print(pagesize, len(data_list))
                    for every_data in data_list:
                        temporary = {}
                        temporary["comp_name"] = every_data.get("ci_name")
                        if len(temporary.get("comp_name")) <= 3:
                            continue
                        temporary["credit_no"] = every_data.get("ci_code", "")
                        temporary["comp_addr"] = every_data.get("ci_reg_addr", "")
                        temporary["comp_type"] = every_data.get("ci_qualification", "")
                        temporary["oper_name"] = every_data.get("ci_legal", "")
                        temporary["econ_kind"] = every_data.get("ci_nature", "")
                        beian_province_name = "宁夏回族自治区"
                        beian_type = "外省" if types == "JN02" else "本省"
                        source_name = "宁夏建筑市场监管服务系统"
                        # print(temporary["comp_name"], beian_type, pagenum, end=" ")
                        unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                        self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                    pagenum += 1
                    time.sleep(0.5)
                    if len(data_list) < pagesize:
                        break
        self.stop_list.append(None)
        print("宁夏回族自治区结束")

    # 宁夏回族自治区
    def run_NXHZZZQ2(self):
        pagenum = 1
        pagesize = 200
        data_dict = {"pagenum": pagenum, "resid": "web_company.quaryRegCorp", "rows": pagesize}
        header = {
            "Accept": "*/*",
            "Accept-Encoding": "gzip, deflate",
            "Host": "222.75.70.198:28092",
            "Origin": "http://222.75.70.198:28092",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Pragma": "no-cache",
            "Referer": "http://222.75.70.198:28092/jzptweb/company_jn_list.html",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }
        url = "http://222.75.70.198:28092/portal.php?"  # 首页
        while True:
            data_dict["pagenum"] = pagenum
            data_str = "page={}&resid=web_company.quaryCorp&ci_qualification_code=1&ci_islocal_code=JN02&rows=15".format(pagenum)
            response = self.get_cecond_lv_page(url, header, "get", data_str)
            # response = self.get_cecond_lv_page(url, header, "post", data_dict)
            if not response:
                break
            if response.text == "":
                break
            response_text = re.sub("\s", "", response.text)
            try:
                data_list = json.loads(response_text)["data"]
            except:
                continue
            if not data_list:
                break
            for every_data in data_list:
                temporary = {}
                temporary["comp_name"] = every_data.get("ci_name")
                if len(temporary.get("comp_name")) <= 3:
                    continue
                temporary["credit_no"] = every_data.get("ci_code")
                temporary["comp_addr"] = every_data.get("ci_reg_addr", "")
                temporary["oper_name"] = every_data.get("ci_legal_person", "")
                beian_province_name = "宁夏回族自治区"
                beian_type = "外省"
                source_name = "宁夏建筑市场监管服务系统"
                # print(temporary["comp_name"], end=" ")
                unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
            if len(data_list) < pagesize:
                break
            pagenum += 1
            time.sleep(0.5)
        self.stop_list.append(None)

    #新疆维吾尔自治区
    def run_XJWWEZZZQ(self):
        print("新疆维吾尔自治区开始")
        pagesize = 10
        header = {
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
            "Host": "jsy.xjjs.gov.cn",
            "Origin": "https://jsy.xjjs.gov.cn",
            # "Cookie": "JSESSIONID=C48F8881655C65320E0462289174589C",
            "Pragma": "no-cache",
            "Referer": "http://jsy.xjjs.gov.cn/compList",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }
        urls = "https://jsy.xjjs.gov.cn/api/corp/companyList"  # 首页
        for types in ["lh", "et"]:
            data_dict = {
                "area": "",
                "areaType": "",
                "qualType": "",
                "complex": "",
                "corpName": "",
                "behaviour": "",
                "corpCode": "",
                "certName": "",
                "current": 1,
                "size": pagesize
            }
            data_dict["areaType"] = types
            pagenum = 1
            while True:
                data_dict["current"] = pagenum
                response = self.get_cecond_lv_page(urls, header, "post", data_dict)
                if not response:
                    break
                if response.text == "":
                    break
                data_list1 = response.json().get("data")
                if not data_list1:
                    break
                data_list = data_list1["records"]
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    temporary["comp_name"] = every_data.get("corpName")
                    if len(temporary.get("comp_name")) <= 3:
                        continue
                    temporary["credit_no"] = every_data.get("corpCode")
                    temporary["comp_addr"] = every_data.get("ci_reg_addr", "")
                    temporary["oper_name"] = every_data.get("legalPerson", "")
                    if every_data.get("quals"):
                        temporary["comp_zz"] = ",".join(every_data.get("quals", ""))
                    beian_province_name = "新疆维吾尔自治区"
                    beian_type = "外省" if types == "et" else "本省"
                    # print(temporary, beian_type , pagenum)
                    source_name = "新疆工程建设云"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                if len(data_list) < pagesize:
                    break
                pagenum += 1
                time.sleep(1)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)
        print("新疆维吾尔自治区结束")

    # 广西壮族自治区
    def run_GXZZZZQ(self):
        print("广西壮族自治区启动")
        # pagenum 限制5页
        # pageseze 参数可变， 默认20， 最大为100
        pageseze = 100
        # entType  jzy  建筑业企业
        # entType  jj  工程监理企业
        # entType  zljc  质量检测机构
        # entType  zbdl  招标代理机构
        # entType  yllh  园林绿化企业
        # entType  kcsj  勘察设计企业
        # entType  sgts  施工图审查
        # entType  xf  消防检测机构
        # entType  fljc  防雷检测企业
        header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "gxjzsc.caihcloud.com",
            "Origin": "http://gxjzsc.caihcloud.com",
            "Pragma": "no-cache",
            "Referer": "",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        url = f"http://gxjzsc.caihcloud.com/ythwz/web/ent/getEnterpriseList"  # 首页
        for types in ["", "jzy", "jj", "zljc", "zbdl", "yllh", "kcsj", "sgts", "xf", "fljc"]:
            pagenum = 1
            while True:
                times2 = int((time.time() * 1000)) - 2
                header["Referer"] = f"http://gxjzsc.caihcloud.com/ythwz/webHtml/ent/index.html?n=ent&t={times2 - 1}"
                data_str = "pageNo={}&pageSize={}&entName=&entType={}&province=&city=&county=&t={}".format(pagenum,
                                                                                                           pageseze,
                                                                                                           types,
                                                                                                           times2)
                response = self.get_cecond_lv_page(url, header, "post", data_str)
                if not response:
                    break
                data_list = response.json().get("list")
                if not data_list:
                    break
                for every_data in data_list:
                    temporary = {}
                    comp_name = every_data.get("name")
                    if not comp_name:
                        continue
                    if len(comp_name.strip()) <= 3:
                        continue
                    temporary["comp_name"] = comp_name.strip()
                    credit_no = every_data.get("businessLicense", "")
                    if credit_no:
                        temporary["credit_no"] = credit_no.strip()
                    comp_addr = every_data.get("address", "")
                    if comp_addr:
                        temporary["comp_addr"] = comp_addr.strip()
                    oper_name = every_data.get("legalPerson", "")
                    if oper_name:
                        temporary["oper_name"] = oper_name.strip()
                    temporary["enterpriseId"] = every_data.get("enterpriseId", "")
                    beian_province_name = "广西壮族自治区"
                    beian_type = "本省" if "广西" in every_data.get("provinceName") else "外省"
                    # print(temporary, beian_type, pagenum)
                    source_name = "广西建筑市场监管云平台"
                    unidd = self.get_unid(re.sub("\s", "", temporary.get("comp_name", "")), re.sub("\s", "", temporary.get("credit_no", "")), re.sub("\s", "", temporary.get("oper_name", "")), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                if len(data_list) < pageseze:
                    break
                pagenum += 1
                time.sleep(1)
        self.stop_list.append(None)
        print("广西壮族自治区结束")

    # 安徽省
    def run_AHS(self):
        print("安徽省启动")
        header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Cookie": "SHIROJSESSIONID=e80ac335-4d2b-465a-8dcc-b033884dfd60",
            "Host": "dohurd.ah.gov.cn",
            "Origin": "http://dohurd.ah.gov.cn",
            "Referer": "http://dohurd.ah.gov.cn/site/tpl/9311",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        pagesize = 200000
        for types in [1, 2]:
            pagenum = 1
            while True:
                # pagesize参数可变 默认10
                data2 = "pagesize={}&pageindex={}&type={}&CorpCode=&CorpName=&LegalMan=&CertTypeNum=&txt1=&AreaCode=".format(pagesize, pagenum, types)
                urls = "http://dohurd.ah.gov.cn/epoint-mini/rest/function/searchSNQY?IsAjax=1&_={}".format(random.random())
                response = self.get_cecond_lv_page(urls, header, "post", data2)
                if not response:
                    break
                if response.text == "":
                    break
                data_list = response.json()["all"]["listinfo"]
                if not data_list:
                    break
                for datas in data_list:
                    temporary_dict = {}
                    comp_name = datas["corpname"]
                    if not comp_name:
                        continue
                    if len(comp_name) <= 3:
                        continue
                    temporary_dict["comp_name"] = comp_name
                    temporary_dict["credit_no"] = datas["corpcode"]
                    temporary_dict["oper_name"] = datas["legalman"]
                    beian_province_name = "安徽省"
                    beian_type = "本省" if types == 1 else "外省"
                    source_name = "安徽省住房和城乡建设厅"
                    unidd = self.get_unid(re.sub("\s", "", temporary_dict["comp_name"]), temporary_dict.get("credit_no", ""),  temporary_dict.get("oper_name", ""), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary_dict, beian_province_name, beian_type, source_name, unidd))
                pagenum += 1
                time.sleep(0.5)
                if len(data_list) < pagesize:
                    break
        self.stop_list.append(None)
        print("安徽省启动结束")

    # 上海市
    def run_SHS(self):
        print("上海市开始")
        for types in [1, 2]:
            pagenum = 1
            data_dict = {
                "dwmc": "",
                "qylx": types,  # 1 上海本市   2外省企业
                "zzdl": "",
                "zzxl": "",
                "zzlb": "",
                "zzdj": "",
                "pagenum": 1
            }
            header = {
                "Accept": "*/*",
                "Accept-Encoding": "gzip, deflate, br",
                "Content-Type": "application/x-www-form-urlencoded",
                "Host": "ciac.zjw.sh.gov.cn",
                "Origin": "https://ciac.zjw.sh.gov.cn",
                "Pragma": "no-cache",
                "Referer": "https://ciac.zjw.sh.gov.cn/CreditManualnew/PublicCompany/SearchIndex",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
            }
            url = f"https://ciac.zjw.sh.gov.cn/CreditManualNew/PublicCompany/SearchList"  # 招标代理企业
            second_url = "https://ciac.zjw.sh.gov.cn/CreditManualNew/PublicCompany/Qyxx"
            while True:
                data_str = "dwmc=&qylx={}&zzdl=&zzxl=null&zzlb=null&zzdj=null&page={}".format(types, pagenum)
                # data_dict["pagenum"] = pagenum
                header["Referer"] = url
                response = self.get_cecond_lv_page(url, header, "post", data_str)
                if not response:
                    break
                if response.text == "":
                    break
                response_html = etree.HTML(response.content.decode(response.apparent_encoding))
                data_list = response_html.xpath('//table[@class="defaultTable tb_Content"]//tr/td[2]/a')
                if not data_list:
                    break
                for data_html in data_list:
                    temporary = {}
                    try:
                        openwindow = data_html.xpath('./@onclick')[0]
                    except:
                        continue
                    link_data = re.sub("[^0-9,]", "", openwindow).split(",")
                    second_data = {"dwid": link_data[0]}
                    header[
                        "Referer"] = f"https://ciac.zjw.sh.gov.cn/CreditManualNew/PublicCompany/ShIndex?dwid={link_data[0]}&dwdm={link_data[1]}"
                    second_reponse = self.get_cecond_lv_page(second_url, header, "post", second_data)
                    if not second_reponse:
                        continue
                    second_reponse_html = etree.HTML(second_reponse.content.decode(response.apparent_encoding))
                    try:
                        temporary["comp_name"] = second_reponse_html.xpath('//table[@class="defaultTable2"]//tr[1]/td[2]/text()')[0].strip()
                        if len(temporary.get("comp_name")) <= 3:
                            continue
                        temporary["credit_no"] = ""
                    except:
                        continue
                    comp_type = second_reponse_html.xpath('//table[@class="defaultTable2"]//tr[2]/td[2]/text()')[
                        0].strip()
                    if comp_type:
                        temporary["comp_type"] = comp_type
                    comp_addr = second_reponse_html.xpath('//table[@class="defaultTable2"]//tr[3]/td[2]/text()')[0]
                    if comp_addr:
                        temporary["comp_addr"] = re.sub("\s", "", comp_addr[0])
                    beian_province_name = "上海市"
                    beian_type = "外省" if types == 2 else "本省"
                    # print(temporary, beian_type, pagenum, types)
                    source_name = "上海市建设市场信息服务平台"
                    unidd = self.get_unid(re.sub("\s", "", temporary["comp_name"]), temporary.get("credit_no", ""),  temporary.get("oper_name", ""), beian_province_name, beian_type, source_name)
                    self.mysql_que.put((temporary, beian_province_name, beian_type, source_name, unidd))
                if len(data_list) < 20:
                    break
                pagenum += 1
                time.sleep(0.5)
        self.stop_list.append(None)
        print("上海市结束")

    # 浙江省 -- selenium 窗口不可关闭
    def run_ZJS2(self):
        print("浙江省开始")
        pagesize = 15
        urls = "https://jzsc.jst.zj.gov.cn/PublicWeb/index.html#/"
        chromedriver = r"D:/QG/config_setting/chromedriver.exe"
        os.environ["webdriver.chrome.driver"] = chromedriver
        chrome_options = webdriver.ChromeOptions()
        # chrome_options.add_argument('headless')
        # chrome_options.add_argument('single-process')
        chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
        chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})  # 停止加载图片
        chrome_options.add_argument("--start-maximized")  # 最大窗口
        driver = webdriver.Chrome(chromedriver, chrome_options=chrome_options)
        driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """Object.defineProperty(navigator, 'webdriver', {get: () => undefined }) """})
        driver.get(urls)
        pagenum = 1
        tries = 1
        while True:
            htmls = driver.page_source
            if "进浙企业" not in htmls:
                time.sleep(1)
                driver.refresh()
                continue
            if pagenum == 1:
                driver.find_element_by_xpath('//div[@class="item icon1"]').click()
            else:
                driver.find_element_by_xpath('//button[@class="btn-next"]').click()
            if tries > 20:
                break
            time.sleep(1)
            htmls = driver.page_source
            html_etree = etree.HTML(htmls)
            data_list = html_etree.xpath('//table[@class="el-table__body"]//tr')
            if not data_list:
                try:
                    if '<p class="el-loading-text">Loading</p>' in htmls:
                        time.sleep(60)
                        driver.refresh()
                        continue
                except:
                    pass
                break
            else:
                try:
                    if 'class="btn-next" disabled="disabled"' in htmls:
                        pagesize = 16
                        break
                except:
                    pass
            if tries > 1:
                tries = 1
            for data_html in data_list:
                temporary_dict = {}
                comp_name = data_html.xpath('./td[2]//text()')
                if not comp_name:
                    continue
                if len(comp_name[0]) <= 3:
                    continue
                temporary_dict["comp_name"] = comp_name[0].strip()
                credit_no = data_html.xpath('./td[3]//text()')
                if credit_no:
                    temporary_dict["credit_no"] = credit_no[0].strip()
                beian_province_name = "浙江省"
                beian_type = "外省"
                source_name = "浙江省建筑市场监管公共服务系统"
                # print(temporary_dict, pagenum, end="")
                unidd = self.get_unid(re.sub("\s", "", temporary_dict.get("comp_name", "")),
                                      re.sub("\s", "", temporary_dict.get("credit_no", "")),
                                      re.sub("\s", "", temporary_dict.get("oper_name", "")), beian_province_name,
                                      beian_type, source_name)
                self.mysql_que.put((temporary_dict, beian_province_name, beian_type, source_name, unidd))
            if len(data_list) < pagesize:
                break
            pagenum += 1
            time.sleep(1)
        print("浙江省结束")
        self.stop_list.append(None)

    # 湖北省 -- selenium 窗口不可关闭
    def run_HBS3(self):
        urls = "http://jg.hbcic.net.cn/web/QyManage/QyList.aspx"
        pagesize = 20
        chromedriver = r"D:/QG/config_setting/chromedriver.exe"
        os.environ["webdriver.chrome.driver"] = chromedriver
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
        chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})    #停止加载图片
        chrome_options.add_argument("--start-maximized")      #最大窗口
        driver = webdriver.Chrome(chromedriver, chrome_options=chrome_options)
        driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {"source":"""Object.defineProperty(navigator, 'webdriver', {get: () => undefined }) """})
        k = PyKeyboard()
        m = PyMouse()
        driver.get(urls)
        pagenum = 1
        tries = 1
        data_dict = {}
        while True:
            if pagenum >= 2:
                if pagenum == 2:
                    # 新开一个窗口，通过执行js来新开一个窗口
                    js = 'window.open("https://www.baidu.com/")'
                    driver.execute_script(js)
                    handles = driver.window_handles
                driver.switch_to.window(handles[-1])
                time.sleep(0.5)
                driver.get("chrome://settings/clearBrowserData")
                time.sleep(0.5)
                m.click(1160, 790, 1, 1)
                time.sleep(0.5)
                driver.switch_to.window(handles[0])
                time.sleep(1)
                k.tap_key(k.page_down_key)
                time.sleep(0.2)
                k.tap_key(k.page_down_key)
                time.sleep(0.2)
                k.tap_key(k.page_down_key)
                time.sleep(0.2)
                m.click(1300, 955, 1, 1)
                time.sleep(1)
            if tries > 20:
                break
            htmls = driver.page_source
            html_etree = etree.HTML(htmls)
            try:
                data_dict["VIEWSTATE"] = html_etree.xpath('//input[@id="__VIEWSTATE"]/@value')[0]
                data_dict["EVENTVALIDATION"] = html_etree.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0]
            except:
                time.sleep(1)
                tries += 1
                continue
            data_list = html_etree.xpath('//table[@filterid="filter"]//tr')
            if not data_list:
                tries += 1
                continue
            if tries > 1:
                tries = 1
            for data_html in data_list[3:23]:
                temporary_dict = {}
                comp_name = data_html.xpath('./td/a/text()')
                if not comp_name:
                    continue
                if len(comp_name[0]) <= 3 or "企业名称" in comp_name[0]:
                    continue
                temporary_dict["comp_name"] = comp_name[0].strip()
                credit_no = data_html.xpath('./td[3]/text()')
                if credit_no:
                    temporary_dict["credit_no"] = credit_no[0].strip()
                oper_name = data_html.xpath('./td[4]/text()')
                if oper_name:
                    temporary_dict["oper_name"] = oper_name[0].strip()
                comp_addr = data_html.xpath('./td[5]/text()')
                if comp_addr:
                    temporary_dict["comp_addr"] = comp_addr[0].strip()
                beian_province_name = "湖北省"
                beian_type = "本省" if temporary_dict.get("comp_addr") in address_json["湖北省"].keys() else "外省"
                source_name = "湖北省建筑市场监督与诚信一体化工作平台"
                # print(temporary_dict, pagenum, beian_type)
                unidd = self.get_unid(re.sub("\s", "", temporary_dict.get("comp_name", "")), re.sub("\s", "", temporary_dict.get("credit_no", "")), re.sub("\s", "", temporary_dict.get("oper_name", "")), beian_province_name, beian_type, source_name)
                self.mysql_que.put((temporary_dict, beian_province_name, beian_type, source_name, unidd))
            if len(data_list[2:23]) < pagesize:
                break
            pagenum += 1
        try:
            driver.quit()
        except:
            pass

    # 湖北省 -- cookies 时效性
    def run_HBS3_copy(self):
        # url = "http://jg.hbcic.net.cn/web/QyManage/QyList.aspx?qylx=8"
        urls = "http://jg.hbcic.net.cn/web/QyManage/QyList.aspx"
        header = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            # "Cookie": "FSSBBIl1UgzbN7NS=55TE6lwhqZ_5WzwMORXokpvR.45u27Q8MYJ0ZEUzo9KoCtoCqPB9OS3ZxlBnWEqClsW92NbUu0XkNidudjgHD1q; __root_domain_v=.hbcic.net.cn; _qddaz=QD.226053893224858; FSSBBIl1UgzbN7NT=53Np6oCtVwSZqqqDrldmMfGqs0LiSb4k5JvwlOdkUKgy6vDBHFSBXafNhYgG7qSI54mdDgtDpc675szt8dHhoyZn1dU255RZ.tqETj2wl87B1YgWoFPyATtvKdMezDyDmNxcbSiUV4BnMqtgEelHMaHX99sK.CJ1NoK0cE7p.qoy1UqYe72rr2gjNr53tARy4IInlGRql.LHdwKJ.aUEIgDIHYvoD6NofD4_T6fouhy6SfkzHkctX4kv08FfhDZ4svlKZVwncvnVhv0LtOw26y6rts4XBdPWEJOg.aHp8DZwYpUMCxMS9RuwfCcBgowHIzbi4rSiNKAQIPu95eYU6hQoz910KDTVhrv636LlPll7A",
            "Host": "jg.hbcic.net.cn",
            "Referer": "http://jg.hbcic.net.cn/web/Head.htm",  # 首页
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }
        data_dict = {}
        pagenum = 1
        while True:
            if pagenum == 1:
                # self.get_cecond_lv_page(url, header, "get")
                response = requests.get(url=urls, headers=header)
                pdb.set_trace()
                print(response.text)
                print()
            else:
                pass
            pagenum += 1
            break

    # 贵州省 -- 名称搜索  贵州省的资质信息和安许信息先不拿
    def run_GZS(self):
        urls = "http://61.243.11.50:8088/GZZHXT/MHWeb/QData/QData.html"

    # 江苏 -- 名称搜索
    def run_JSS(self):
        "http://58.213.147.230:7001/Jsjzyxyglpt/faces//public/zhcx5c/qycxList.jsp?dataType=website-zhcx-qycx&menucode=019001"

    # 四川 -- 名称搜索
    def run_SCS(self):
        "http://202.61.88.188/xxgx/Enterprise/eList.aspx"
        "http://jst.sc.gov.cn/scjst/xhtml/dataShow.html?l=datashow"

    # 天津市 -- token
    def run_TJS(self):
        pagenum = 1
        times2 = int((time.time())) - 2
        token = "RUNNING:CAPTCHA:second-l4VrehOktBwYwabiv5lraHB6EgAl69wY1WV0t6ugPjtKqy0HFY4FWHXOgbLzKfjgPJMIO+UGYT59Pzf9tZHtiA=="
        data_dict = {
            "_t": times2,
            "column": "id",
            "order": "desc",
            "field": "id,,,qyshtyxydm,qymc,qyzcszx,frdb,qyzcdz,action",
            "pageNo": pagenum,
            "pageSize": 10,
            "secondKey": quote(token)
        }
        header = {
            "Accept": "application/json, text/plain, */*",
            "Host": "60.29.202.86:8090",
            "Referer": "http://60.29.202.86:8090/",
            "X-Access-Token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2NTI5Mjk3NTUsInVzZXJuYW1lIjoiZ2djeCJ9.C3AwxSVDTmzxsXhjE0mrjzqi5AIxHjIS7CL6e6msI-E",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
        }
        # pageSize  参数可变, 默认为10
        url = f"http://60.29.202.86:8090/ggcx-service/zjw/viewBsdtQyJbxx/list?_t={times2}&column=id&order=desc&field=id,,,qyshtyxydm,qymc,qyzcszx,frdb,qyzcdz,action&pageNo=2&pageSize=10000&secondKey=RUNNING:CAPTCHA:second-8kb5%2B6m3cl9EXDX0AB4bw3uaZj%2BeRqSLdTuCtBWr3aOUg3cdPGj8kIk%2BSOk80VMtMQ0qqEno495JzqWQFSbD3A%3D%3D"  # 首页
        data_str = f"_t={times2}&column=id&order=desc&field=id,,,qyshtyxydm,qymc,qyzcszx,frdb,qyzcdz,action&pageNo=2&pageSize=10&secondKey={quote(token, safe='')}"
        while True:
            data_dict["current"] = pagenum
            pdb.set_trace()
            response = requests.get(url=url, headers=header, data=data_str, verify=False)
            print(response.text)
            pagenum += 1
            time.sleep(1)
            if pagenum == 2:
                break
            break

    # 湖南省--手动添加movex， pageSize参数可变
    def run_HNS(self):
        print("湖南省启动")
        # https://gcxm.hunanjs.gov.cn/dataservice.html
        # url1 = 'https://gcxm.hunanjs.gov.cn/AjaxHandler/PersonHandler.ashx?method=GetVerifyImg'
        url2 = 'https://gcxm.hunanjs.gov.cn/AjaxHandler/PersonHandler.ashx'
        headers_copy = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Encoding': 'gzip, deflate, br',
            'Cookie': 'ASP.NET_SessionId=ecok1bslcwg34ui42jp23tot',
            'Host': 'gcxm.hunanjs.gov.cn',
            'Pragma': 'no-cache',
            'Referer': 'https://gcxm.hunanjs.gov.cn/dataservice.html',
            'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'
        }
        # chrome_options = webdriver.ChromeOptions()
        # chrome_options.add_experimental_option('w3c', False)
        # caps = DesiredCapabilities.CHROME
        # caps['loggingPrefs'] = {'performance': 'ALL'}
        # driver = webdriver.Chrome(desired_capabilities=caps, options=chrome_options)
        # driver.get("https://gcxm.hunanjs.gov.cn/dataservice.html")
        # input("任意键继续")
        # logs = [json.loads(log['message'])['message'] for log in driver.get_log('performance')]
        # driver.close()
        # driver.quit()
        # datas2 = {"method": "GetVerifyImg"}
        # response = requests.get(url=url1, headers=headers_copy, data=datas2)
        # key_dict = response.json()
        while True:
            input_movex = input("输入movex与verifyid参数，英文逗号隔开：")
            try:
                movex_verifyid = input_movex.split(",")
                if movex_verifyid[0] and movex_verifyid[1]:
                    break
                else:
                    continue
            except:
                continue
        movex_verifyid = [67, "ad4f5d68819445da931ab434035f8238"]
        while True:
            headers_copy["User-Agent"] = random.choice(user_agent)
            # datas = "method=GetListPage&type=1&corptype_1=&corpname_1=&licensenum_1=&Province_1=430000&City_1=&county_1=&persontype=&persontype_2=&personname_2=&idcard_2=&certnum_2=&corpname_2=&prjname_3=&corpname_3=&prjtype_3=&cityname_3=&corpname_5=&corpcode_5=&legalman_5=&cityname_5=&SafeNum_6=&corpname_6=&corpname_7=&piciname_7=&corptype_7=&corpname_8=&corpcode_8=&legalman_8=&cityname_8=&pageSize=20000&pageIndex=1&moveX={}&verifyid={}".format(movex2, key_dict[-2])
            datas = "method=GetListPage&type=1&corptype_1=&corpname_1=&licensenum_1=&Province_1=430000&City_1=&county_1=&persontype=&persontype_2=&personname_2=&idcard_2=&certnum_2=&corpname_2=&prjname_3=&corpname_3=&prjtype_3=&cityname_3=&corpname_5=&corpcode_5=&legalman_5=&cityname_5=&SafeNum_6=&corpname_6=&corpname_7=&piciname_7=&corptype_7=&corpname_8=&corpcode_8=&legalman_8=&cityname_8=&pageSize=30&pageIndex=1&moveX={}&verifyid={}".format(
                movex_verifyid[0], movex_verifyid[1])
            response = requests.get(url=url2, headers=headers_copy, params=datas)
            print(response.text)
            time.sleep(1)
            # break

    # 黑龙江--验证码
    def run_HLJ(self):
        while True:
            url = "http://219.147.76.5:7507/Dop/Open/EnterpriseList.aspx"
            header = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "Accept-Encoding": "gzip, deflate",
                "Content-Type": "application/x-www-form-urlencoded",
                "Host": "219.147.76.5:7507",
                "Origin": "http://219.147.76.5:7507",
                "Referer": "http://219.147.76.5:7507/Dop/Open/EnterpriseList.aspx"}

            response = self.get_cecond_lv_page(url, header, "get")
            if not response:
                break
            if response.text == "":
                break
            response_html = etree.HTML(response.content.decode(response.apparent_encoding))
            try:
                __EVENTTARGET = response_html.xpath('//input[@id="__EVENTTARGET"]')[0]
                __VIEWSTATE = response_html.xpath('//input[@id="__VIEWSTATE"]')[0]
                __VIEWSTATEGENERATOR = response_html.xpath('//input[@id="__VIEWSTATEGENERATOR"]')[0]
            except:
                time.sleep(1)
                continue
            provices = response_html.xpath(
                '//select[@id="ctl00_ContentPlaceHolder1_ddlProvice"]/option[not(@selected)]/@value')
            "230000"
            for provice in provices:
                while True:
                    data_dict = {"ctl00$ContentPlaceHolder1$hidCheckCodeMark": "{}.7336645792567".format(
                        random.randint(350, 800))}
                    pagenum = 1
                    pagesize = 20
                    data_str = "__EVENTTARGET=ctl00%24ContentPlaceHolder1%24AspNetPager1&__EVENTARGUMENT={}&__LASTFOCUS=&__VIEWSTATE={}&__VIEWSTATEGENERATOR={}&ctl00%24ContentPlaceHolder1%24txtOrgName=&ctl00%24ContentPlaceHolder1%24ddlProvice={}&ctl00%24ContentPlaceHolder1%24txtCreditCode=&ctl00%24ContentPlaceHolder1%24txtOrgCode=&ctl00%24ContentPlaceHolder1%24ddlCity=&ctl00%24ContentPlaceHolder1%24txtCheckCode=&ctl00%24ContentPlaceHolder1%24hidCheckCodeMark=134.97529683234654"

            break

    def run_selenium(self):
        self.run_ZJS2()
        self.run_HBS3()
        self.stop_list.append(None)



def test():
    while True:
        tokens = "LRpx8vCT7mt/JwKr7+2ssyqeiOUFFvEGs+XaLWW+zW9WxTVDRbkUiBJ6rMlLbho4Ddn4TmoCE7H6GNJ8RoHk/g=="
        url = "http://60.29.202.86:8090/ggcx-service/zjw/viewBsdtQyJbxx/list?_t=1653729271&column=id&order=desc&field=id,,,qyshtyxydm,qymc,qyzcszx,frdb,qyzcdz,action&pageNo=1&pageSize=10&secondKey=RUNNING:CAPTCHA:second-{}".format(quote(tokens, safe=""))
        header = {
            "Accept": "application/json, text/plain, */*",
            "Host": "60.29.202.86:8090",
            "Referer": "http://60.29.202.86:8090/",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36",
            "X-Access-Token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2NTM3MzA5MTAsInVzZXJuYW1lIjoiZ2djeCJ9.S6vzO8s2AjuIBM72qZxduikNm9cb-btQqALAyrT8kBk"
        }
        data_str = "_t=1653729271&column=id&order=desc&field=id,,,qyshtyxydm,qymc,qyzcszx,frdb,qyzcdz,action&pageNo=1&pageSize=1&secondKey=RUNNING:CAPTCHA:second-{}".format(quote(tokens, safe=""))
        pdb.set_trace()
        response = requests.get(url=url, headers=header, data=data_str)
        response.raw
        if not response:
            break
        if response.text == "":
            break

if __name__ == '__main__':
    # test()
    beian = beian_obj()
    # sorc_list = ['安徽省住房和城乡建设厅', '北京市住房和城乡建设委员会', '山西省智慧建筑管理服务信息平台', '河北省住房和城乡建设厅', '内蒙古自治区建筑市场监管与诚信信息平台', '辽宁省建筑市场公共服务平台', '吉林省建筑市场监管公共服务平台', '福建省建设行业信息公开平台', '江西住建云', '河南省建筑市场监管公共服务平台', '广东省建设行业数据开放平台', '海南省建筑市场信息公开平台', '广西建筑市场监管云平台', '重庆建设工程信息网', '云南省建筑市场监管与诚信信息网', '西藏自治区建筑市场监管与诚信信息平台', '外省入陕建筑业企业登记信息查询', '甘肃省住房和城乡建设厅', '青海省工程建设监管和信用管理平台', '宁夏建筑市场监管服务系统', '新疆工程建设云', '上海市建设市场信息服务平台', '山东省住房城乡建设服务监管与信用信息综合平台']
    # print(len(sorc_list))
    # sorc_plat = [beian.run_BJS, beian.run_BJS2, beian.run_SXS, beian.run_SXS3, beian.run_HeBS, beian.run_NMG, beian.run_LNS, beian.run_JLS, beian.run_FJS, beian.run_JXS, beian.run_GDS, beian.run_HaiNS, beian.run_CQS_copy, beian.run_CQS2, beian.run_CQS3, beian.run_YNS, beian.run_XZZZQU, beian.run_ShanXS, beian.run_GSS, beian.run_QHS, beian.run_NXHZZZQ, beian.run_NXHZZZQ2, beian.run_XJWWEZZZQ, beian.run_HeNS, beian.run_GXZZZZQ, beian.run_AHS, beian.run_SHS, beian.run_SDS, beian.run_SDS_outside, beian.run_HBS2] #, beian.run_SXS_inner, beian.run_selenium
    sorc_plat = [beian.run_BJS, beian.run_BJS2, beian.run_SXS, beian.run_SXS3, beian.run_HeBS, beian.run_NMG, beian.run_LNS, beian.run_JLS, beian.run_FJS, beian.run_JXS, beian.run_GDS, beian.run_HaiNS, beian.run_CQS_copy, beian.run_CQS2, beian.run_CQS3, beian.run_YNS, beian.run_XZZZQU, beian.run_ShanXS, beian.run_GSS, beian.run_QHS, beian.run_NXHZZZQ, beian.run_NXHZZZQ2, beian.run_XJWWEZZZQ, beian.run_HeNS, beian.run_GXZZZZQ, beian.run_AHS, beian.run_SHS, beian.run_SDS, beian.run_SDS_outside, beian.run_HBS2] #, beian.run_SXS_inner, beian.run_selenium
    # beian.run_selenium
    thread_list = []
    print(f"开始时间：{datetime.now()}")
    for every_plat in sorc_plat:
        t1 = Thread(target=every_plat)
        thread_list.append(t1)
        # break

    for threads in thread_list:
        threads.setDaemon(True)
        threads.start()

    save_mysql_thread = Thread(target=beian.save_mysql, args=(1,))
    save_mysql_thread.start()
    for threadj in thread_list:
        threadj.join()

    print(f"结束时间：{datetime.now()}")
    beian.run_HBS3()