# -*- coding:utf-8 -*-
import asyncio
import logging

from save_mysql import *

import re
from urllib import parse
import os
import json
import requests
from lxml import etree
import time

class Spider(object):
    def __init__(self, cookies, value):
        self.cookies = cookies
        self.value = value
        self.proxy = {'http': '120.83.120.72:9999'}

    def run(self):
        '''主运行函数'''
        mysql = SaveMySQL()   #目标库初始化
        dicts = {'key': f'{self.value}'}  # 这里必须为字典
        keyword = parse.urlencode(dicts)
        url = 'https://www.tianyancha.com/search?' + keyword + '&checkFrom=searchBox'
        print(url)
        headers = {
            'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8, application / signed - exchange;v = b3',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Referer': f'https://antirobot.tianyancha.com/captcha/verify?return_url=https%3A%2F%2Fwww.tianyancha.com%2Fsearch%3Fkey%3D{self.value}%26checkFrom%3DsearchBox&rnd=',
            'Host': 'www.tianyancha.com'}
        time.sleep(2)
        response = requests.get(url, headers=headers, cookies=self.cookies, proxies=self.proxy)
        now_url=response.url
        if 'https://www.tianyancha.com/login' in now_url:
            print('cookies过期请重新登录')
            return None,'cookies过期请重新登录'
        re_rule = re.compile(r'https://www\.tianyancha\.com/company/\d*')
        try:
            url = re_rule.findall(response.text)[0]  # 详情页url
        except IndexError as e:
            return e.args,'需要手工登录网页进行验证'
        print(url)
        SYSID = url.split(r'/')[-1]
        time.sleep(2)
        response = requests.get(url, headers=headers, proxies=self.proxy, cookies=self.cookies)
        text = response.text
        html = etree.HTML(text)
        data_list=[]
        try:
            basic_info, gszch ,tyxydm,company_name= self.get_basic_information(html, SYSID)
        except Exception as e:
            return e.args,'基本信息'
        if gszch=='-':
            return None,'没有工商注册号'
        '''获取基本信息，返回信息列表和工商注册号'''
        print(f'基本信息:  {basic_info}')
        print('==' * 50)

        # if html.xpath("//div[@id='_container_staff']"):
        #     try:
        #         key_person_list = self.get_key_person(html, gszch)
        #     except Exception as e :
        #         return e.args,'主要人员'
        #     data_list.append(['主要人员',key_person_list])
        #     '''获取主要人员信息,返回人员信息列表'''
        #     print(len(key_person_list))
        #     print(f'主要人员:  {key_person_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_holder']"):
        #     try:
        #         shareholder_info_list = self.get_shareholder_info(html, gszch)
        #     except Exception as e:
        #         return e.args,'股东信息'
        #     data_list.append(['股东信息', shareholder_info_list])
        #     print(len(shareholder_info_list))
        #     print(f'股东信息:  {shareholder_info_list}')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastHolderCount']"):
        #         try:
        #             shareholder_info_list = self.past_get_shareholder_info(html, gszch)
        #         except Exception as e:
        #             return e.args, '历史股东信息'
        #         data_list.append(['股东信息', shareholder_info_list])
        #         print(len(shareholder_info_list))
        #         print(f'历史股东信息:  {shareholder_info_list}')
        #         print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_stockChangeInfo']"):
        #     try:
        #         stockChangeInfo_list = self.get_stockChangeInfo(html, gszch)
        #     except Exception as e:
        #         return e.args, '股东变更'
        #     data_list.append(['股东变更', stockChangeInfo_list])
        #     print(len(stockChangeInfo_list))
        #     print(f'股东变更:  {stockChangeInfo_list}')
        #     print('==' * 50)
        #
        #
        # if html.xpath("//div[@id='_container_invest']"):
        #     '''获取对外投资信息'''
        #     try:
        #         foreign_investment = self.get_foreign_investment(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args,'对外投资'
        #     data_list.append(['对外投资', foreign_investment])
        #     print(len(foreign_investment))
        #     print(f'对外投资:  {foreign_investment}')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastInverstCount']"):
        #         try:
        #             foreign_investment = self.past_get_foreign_investment(html, gszch)
        #         except Exception as e:
        #             return e.args, '历史对外投资'
        #         data_list.append(['对外投资', foreign_investment])
        #         print(len(foreign_investment))
        #         print(f'历史对外投资:  {foreign_investment}')
        #         print('==' * 50)
        #
        #
        # if html.xpath("//div[@id='_container_changeinfo']"):
        #     '''获取变更记录'''
        #     try:
        #         change_record_list = self.get_change_record(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args,'变更记录'
        #     data_list.append(['变更记录', change_record_list])
        #     print(len(change_record_list))
        #     print(f'变更记录:  {change_record_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_branch']"):
        #     '''分支机构'''
        #     try:
        #         get_branch_list = self.get_branch(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args,'分支机构'
        #     data_list.append(['分支机构', get_branch_list])
        #     print(len(get_branch_list))
        #     print(f'分支机构:  {get_branch_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_announcementcourt']"):
        #     '''获取开庭公告'''
        #     try:
        #         court_notice_list = self.get_court_notice(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args,'开庭公告'
        #     data_list.append(['开庭公告', court_notice_list])
        #     print(len(court_notice_list))
        #     print(f'开庭公告:  {court_notice_list}')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastAnnouncementCount']"):
        #         try:
        #             court_notice_list = self.get_pastAnnouncementCount(html, gszch,SYSID, url)
        #         except Exception as e:
        #             return e.args, '历史开庭公告'
        #         data_list.append(['开庭公告', court_notice_list])
        #         print(len(court_notice_list))
        #         print(f'历史开庭公告:  {court_notice_list}')
        #         print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_lawsuit']"):
        #     '''法律诉讼'''
        #     try:
        #         legal_action_list = self.get_legal_action(html, gszch,company_name,SYSID,url)
        #     except Exception as e:
        #         return e.args,'法律诉讼'
        #     data_list.append(['法律诉讼', legal_action_list])
        #     print(f'法律诉讼获取{len(legal_action_list)}条')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastLawsuitCount']"):
        #         try:
        #             legal_action_list = self.get_pastLawsuitCount(html, gszch,SYSID, url)
        #         except Exception as e:
        #             return e.args, '历史法律诉讼'
        #         data_list.append(['法律诉讼', legal_action_list])
        #         print(f'历史法律诉讼获取{len(legal_action_list)}条')
        #         print('==' * 50)
        #
        #
        # if html.xpath("//div[@id='_container_court']"):
        #     '''法院公告'''
        #     try:
        #         court_announcement_list = self.get_court_announcement(html, gszch,company_name,SYSID,url)
        #     except Exception as e:
        #         return e.args,'法院公告'
        #     data_list.append(['法院公告', court_announcement_list])
        #     print(len(court_announcement_list))
        #     print(f'法院公告:  {court_announcement_list}')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastCourtCount']"):
        #         try:
        #             court_announcement_list = self.get_pastCourtCount(html, gszch,SYSID, url)
        #         except Exception as e:
        #             return e.args, '历史法院公告'
        #         data_list.append(['法院公告', court_announcement_list])
        #         print(len(court_announcement_list))
        #         print(f'历史法院公告:  {court_announcement_list}')
        #         print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_zhixing']"):
        #     '''被执行人'''
        #     try:
        #         execute_person_list = self.get_execute_person(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args,'被执行人'
        #     data_list.append(['被执行人', execute_person_list])
        #     print(len(execute_person_list))
        #     print(f'被执行人:  {execute_person_list}')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastZhixing']"):
        #         try:
        #             execute_person_list = self.get_past_execute_person(html, gszch, SYSID, url)
        #         except Exception as e:
        #             return e.args, '历史被执行人'
        #         data_list.append(['被执行人', execute_person_list])
        #         print(len(execute_person_list))
        #         print(f'历史被执行人:  {execute_person_list}')
        #         print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_abnormalRemove']"):
        #     '''经营异常'''
        #     try:
        #         abnormal_operation_list = self.get_abnormal_operation(html, gszch)
        #     except Exception as e :
        #         return e.args,'经营异常'
        #     data_list.append(['经营异常', abnormal_operation_list])
        #     print(len(abnormal_operation_list))
        #     print(f'经营异常:  {abnormal_operation_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_abnormalPut']"):
        #     '''经营异常(列入异常)'''
        #     try:
        #         container_abnormalPut_list = self.get_abnormal_operation_put(html, gszch)
        #     except Exception as e :
        #         return e.args,'经营异常'
        #     data_list.append(['经营异常', container_abnormalPut_list])
        #     print(len(container_abnormalPut_list))
        #     print(f'经营异常:  {container_abnormalPut_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_punish']"):
        #     '''行政处罚'''
        #     try:
        #         administrative_penalty_list = self.get_administrative_penalty(html, gszch,company_name,SYSID,url,text)
        #     except Exception as e:
        #         return e.args,'行政处罚'
        #     data_list.append(['行政处罚', administrative_penalty_list])
        #     print(len(administrative_penalty_list))
        #     print(f'行政处罚:  {administrative_penalty_list}')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastPunishmentIC']"):
        #         try:
        #             administrative_penalty_list = self.get_administrative_penalty_past(html, gszch,SYSID, url)
        #         except Exception as e:
        #             return e.args, '行政处罚'
        #         data_list.append(['行政处罚', administrative_penalty_list])
        #         print(len(administrative_penalty_list))
        #         print(f'历史行政处罚:  {administrative_penalty_list}')
        #         print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_illegalPut']"):
        #     '''严重违法'''
        #     try:
        #         serious_illegal_list = self.get_serious_illegal(html, gszch)
        #     except Exception as e:
        #         return e.args,'严重违法'
        #     data_list.append(['严重违法', serious_illegal_list])
        #     print(len(serious_illegal_list))
        #     print(f'严重违法:  {serious_illegal_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_equity']"):
        #     '''股权出质'''
        #     try:
        #         equity_pledge_list = self.get_equity_pledge(html, gszch, company_name,SYSID,url)
        #     except Exception as e:
        #         return e.args,'股权出质'
        #     data_list.append(['股权出质', equity_pledge_list])
        #     print(len(equity_pledge_list))
        #     print(f'股权出质:  {equity_pledge_list}')
        #     print('==' * 50)
        # else:
        #     if html.xpath("//div[@id='_container_pastEquityCount']"):
        #         try:
        #             equity_pledge_list = self.get_equity_pledge_past(html, gszch,SYSID, url)
        #         except Exception as e:
        #             return e.args, '历史股权出质'
        #         data_list.append(['股权出质', equity_pledge_list])
        #         print(len(equity_pledge_list))
        #         print(f'历史股权出质:  {equity_pledge_list}')
        #         print('==' * 50)
        #
        #
        #
        # if html.xpath("//div[@id='_container_mortgage']"):
        #     '''动产抵押'''
        #     try:
        #         chattel_mortgage_list = self.get_chattel_mortgage(html, gszch, text, url)
        #     except Exception as e:
        #         return e.args, '动产抵押'
        #     data_list.append(['动产抵押', chattel_mortgage_list])
        #     print(len(chattel_mortgage_list))
        #     print(f'动产抵押:  {chattel_mortgage_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_towntax']"):
        #     '''欠税公告'''
        #     try:
        #         owing_taxes_notice_list = self.get_owing_taxes_notice(html, gszch)
        #     except Exception as e:
        #         return e.args, '欠税公告'
        #     data_list.append(['欠税公告', owing_taxes_notice_list])
        #     print(len(owing_taxes_notice_list))
        #     print(f'欠税公告:  {owing_taxes_notice_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_judicialSale']"):
        #     '''司法拍卖'''
        #     try:
        #         judicial_sale_list = self.get_judicial_sale(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '司法拍卖'
        #     data_list.append(['司法拍卖', judicial_sale_list])
        #     print(len(judicial_sale_list))
        #     print(f'司法拍卖:  {judicial_sale_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_teamMember']"):
        #     '''核心团队'''
        #     try:
        #         core_team_list = self.get_core_team(html, gszch,company_name,url)
        #     except Exception as e:
        #         return e.args, '核心团队'
        #     data_list.append(['核心团队', core_team_list])
        #     print(len(core_team_list))
        #     print(f'核心团队:  {core_team_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_firmProduct']"):
        #     '''企业业务'''
        #     try:
        #         business_events_list = self.get_business_events(html, gszch)
        #     except Exception as e:
        #         return e.args, '企业业务'
        #     data_list.append(['企业业务', business_events_list])
        #     print(len(business_events_list))
        #     print(f'企业业务:  {business_events_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_touzi']"):
        #     '''投资事件'''
        #     try:
        #         investment_event_list = self.get_investment_event(html, gszch,company_name,url)
        #     except Exception as e:
        #         return e.args, '投资事件'
        #     data_list.append(['投资事件', investment_event_list])
        #     print(len(investment_event_list))
        #     print(f'投资事件:  {investment_event_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_jingpin']"):
        #     '''竞品信息'''
        #     try:
        #         competing_goods_information = self.get_competing_goods_information(html, gszch,company_name,url)
        #     except Exception as e:
        #         return e.args, '竞品信息'
        #     data_list.append(['竞品信息', competing_goods_information])
        #     print(len(competing_goods_information))
        #     print(f'竞品信息:  {competing_goods_information}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_taxcredit']"):
        #     '''税务评级'''
        #     try:
        #         tax_credit_rating_list = self.get_tax_credit_rating(html, gszch)
        #     except Exception as e:
        #                 return e.args, '税务评级'
        #     data_list.append(['税务评级', tax_credit_rating_list])
        #     print(len(tax_credit_rating_list))
        #     print(f'税务评级:  {tax_credit_rating_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_check']"):
        #     '''抽查检查'''
        #     try:
        #         check_list = self.get_check(html, gszch)
        #     except Exception as e:
        #         return e.args, '抽查检查'
        #     data_list.append(['抽查检查', check_list])
        #     print(len(check_list))
        #     print(f'抽查检查:  {check_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_certificate']"):
        #     '''资质证书'''
        #     try:
        #         qualification_certificate_list = self.get_qualification_certificate(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '资质证书'
        #     data_list.append(['资质证书', qualification_certificate_list])
        #     print(len(qualification_certificate_list))
        #     print(f'资质证书:  {qualification_certificate_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_bid']"):
        #     '''招投标'''
        #     try:
        #         bid_list = self.get_bid(html, gszch,url,SYSID)
        #     except Exception as e:
        #         return e.args, '招投标'
        #     data_list.append(['招投标', bid_list])
        #     print(len(bid_list))
        #     print(f'招投标:  {bid_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_baipin']"):
        #     '''招聘'''
        #     try:
        #         recruit_list=self.get_recruit(html, gszch,url,SYSID)
        #     except Exception as e:
        #         return e.args, '招投标'
        #     data_list.append(['招聘', recruit_list])
        #     print(len(recruit_list))
        #     print(f'招聘:  {recruit_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_wechat']"):
        #     '''微信公众号'''
        #     try:
        #         wechat_list=self.get_wechat(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '微信公众号'
        #     data_list.append(['微信公众号', wechat_list])
        #     print(len(wechat_list))
        #     print(f'微信公众号:  {wechat_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_importAndExport']"):
        #     '''进出口信用'''
        #     try:
        #         importAndExport_list = self.get_importAndExport(html, gszch,text)
        #     except Exception as e:
        #         return e.args, '进出口信用'
        #     data_list.append(['进出口信用', importAndExport_list])
        #     print(len(importAndExport_list))
        #     print(f'进出口信用:  {importAndExport_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_bond']"):
        #     '''债券信息'''
        #     try:
        #         bond_list = self.get_bond(html, gszch)
        #     except Exception as e:
        #         return e.args, '债券信息'
        #     data_list.append(['债券信息', bond_list])
        #     print(len(bond_list))
        #     print(f'债券信息:  {bond_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_purchaselandV2']"):
        #     '''购地信息'''
        #     try:
        #         purchaselandV2_list = self.get_purchaselandV2(html, gszch,text)
        #     except Exception as e:
        #         return e.args, '购地信息'
        #     data_list.append(['购地信息', purchaselandV2_list])
        #     print(len(purchaselandV2_list))
        #     print(f'购地信息:  {purchaselandV2_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//div[@id='_container_patent']"):
        #     '''专利'''
        #     try:
        #         patent_list = self.get_patent(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '专利'
        #     data_list.append(['专利', patent_list])
        #     print(len(patent_list))
        #     print(f'专利:  {patent_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//*[@id='_container_tmInfo']/div[last()]"):
        #     '''商标信息'''
        #     try:
        #         tmInfo_list = self.get_tmInfo(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '商标信息'
        #     data_list.append(['商标信息', tmInfo_list])
        #     print(len(tmInfo_list))
        #     print(f'商标信息:  {tmInfo_list}')
        #     print('==' * 50)

        print('1111111111111')
        trs = html.xpath('//div[@id="_container_pastDishonest"]/table/tbody/tr')
        print(trs)
        if html.xpath("//*[@id='_container_dishonest']"):
            '''失信人'''
            try:
                dishonest_list=self.get_dishonest(html, gszch,text,url)
            except Exception as e:
                return e.args, '失信人'
            data_list.append(['失信人', dishonest_list])
            print(len(dishonest_list))
            print(f'失信人:  {dishonest_list}')
            print('==' * 50)
        #else:
        if html.xpath("//*[@id='_container_pastDishonest']/table/tbody/tr"):
            try:
                dishonest_list = self.get_past_dishonest(html, gszch, text, url)
            except Exception as e:
                return e.args, '历史失信人'
            data_list.append(['失信人', dishonest_list])
            print(len(dishonest_list))
            print(f'历史失信人:  {dishonest_list}')
            print('==' * 50)

        # if html.xpath("//*[@id='_container_product']"):
        #     '''产品信息'''
        #     try:
        #         product_list = self.get_product(html, gszch)
        #     except Exception as e:
        #         return e.args, '产品信息'
        #     data_list.append(['产品信息', product_list])
        #     print(len(product_list))
        #     print(f'产品信息:  {product_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//*[@id='_container_rongzi']"):
        #     '''融资历史'''
        #     try:
        #         rongzi_list=self.get_rongzi(html, gszch,company_name,url)
        #     except Exception as e:
        #         return e.args, '融资历史'
        #     data_list.append(['融资历史', rongzi_list])
        #     print(len(rongzi_list))
        #     print(f'融资历史:  {rongzi_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//*[@id='_container_copyright']"):
        #     '''软件著作权'''
        #     try:
        #         copyright_list=self.get_copyright(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '软件著作权'
        #     data_list.append(['软件著作权', copyright_list])
        #     print(len(copyright_list))
        #     print(f'软件著作权:  {copyright_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//*[@id='_container_icp']"):
        #     '''网站备案'''
        #     try:
        #         icp_list = self.get_icp(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '网站备案'
        #     data_list.append(['网站备案', icp_list])
        #     print(len(icp_list))
        #     print(f'网站备案:  {icp_list}')
        #     print('==' * 50)
        #
        # if html.xpath("//*[@id='_container_copyrightWorks']"):
        #     '''作品著作权'''
        #     try:
        #         copyrightWorks_list=self.get_copyrightWorks(html, gszch,SYSID,url)
        #     except Exception as e:
        #         return e.args, '作品著作权'
        #     data_list.append(['作品著作权', copyrightWorks_list])
        #     print(len(copyrightWorks_list))
        #     print(f'作品著作权:  {copyrightWorks_list}')
        #     print('==' * 50)

        mysql.update_message('基本信息', basic_info) #更新公司基本信息
        for data in data_list:
            mysql.delete_other(data[0], gszch)
            a,b=mysql.insert_other(data[0], data[1])
            if '存入数据库有误' in a:
                return a,b
        ReadMySQL().upload_data(tyxydm)   #更新源基本信息表，上传状态
        time.sleep(2)
        a=None
        b=None
        return a,b

    def get_basic_information(self, html, SYSID):
        '''基本信息'''
        basic_info = []
        try:
            net = html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[2]/div[1]/a/@href")[0]
        except IndexError:
            net = '暂无信息'
        company_name = html.xpath("//h1[@class='name']/text()")[0]  # 公司名称
        try:
            intro=html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[3]/div/div/text()")[0]
        except IndexError:
            intro='暂无信息'
        try:
            tel = html.xpath("//span[@class='hidden']/text()")[0]  # 电话
            tel = re.sub(r'[\[\]"]', '', str(tel))
        except IndexError:
            tel = '暂无信息'
        email = html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[1]/div[2]/span[2]/text()")[0]  # 邮箱
        address = html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[2]/div[2]/div/div/text()")[0]  # 公司地址
        gszch = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[3]/td[4]/text()")[
            0]  # 工商注册号
        zzjgdm = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[4]/td[4]/text()")[
            0]  # 组织机构代码
        tyxydm = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[3]/td[2]/text()")[
            0]  # 统一信用代码
        gslx = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[5]/td[2]/text()")[
            0]  # 公司类型
        nsrsbh = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[4]/td[2]/text()")[
            0]  # 纳税人识别号
        hy = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[5]/td[4]/text()")[0]  # 行业
        yyqx = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[7]/td[2]//text()")[
            0]  # 营业期限
        djjg = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[6]/td[4]//text()")[
            0]  # 登记机关
        zcdz = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[10]/td[2]//text()")[
            0]  # 注册地址
        ywmc = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[9]/td[4]//text()")[
            0]  # 英文名称
        jyfw = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[11]/td[2]//text()")[
            0]  # 经营范围
        zczb = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[1]/td[2]//text()")[
            0]  # 注册资本
        zcsj = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[2]/td[2]//text()")[
            0]  # 注册时间
        hzsj = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[6]/td[2]//text()")[
            0]  # 核准时间
        qyzt = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[2]/td[4]//text()")[
            0]  # 企业状态
        nszz = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[7]/td[4]//text()")[
            0]  # 纳税资质
        rygm = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[8]/td[2]//text()")[
            0]  # 人员规模
        sjzb = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[1]/td[4]//text()")[
            0]  # 实缴资本
        cbrs = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[8]/td[4]//text()")[
            0]  # 参保人数
        if zczb == '-':
            zczb = ''
        fr = html.xpath("//div[@class='name']//text()")[0]  # 法人
        basic_info.append(SYSID)
        basic_info.append(tel)
        basic_info.append(email)
        basic_info.append(net)
        basic_info.append(address)
        basic_info.append(intro)
        basic_info.append(gszch)
        basic_info.append(zzjgdm)
        basic_info.append(tyxydm)
        basic_info.append(gslx)
        basic_info.append(nsrsbh)
        basic_info.append(hy)
        basic_info.append(yyqx)
        basic_info.append(djjg)
        basic_info.append(zcdz)
        basic_info.append(ywmc)
        basic_info.append(jyfw)
        basic_info.append(company_name)
        basic_info.append(fr)
        basic_info.append(zczb)
        basic_info.append(zcsj)
        basic_info.append(hzsj)
        basic_info.append(qyzt)
        basic_info.append(nszz)
        basic_info.append(rygm)
        basic_info.append(sjzb)
        basic_info.append(cbrs)
        return basic_info, gszch ,tyxydm,company_name

    def get_key_person(self, html, gszch):
        '''主要人员'''
        key_person_list = []
        trs = html.xpath("//*[@id='_container_staff']/div/table/tbody/tr")
        for tr in trs:
            data = []
            zw = tr.xpath('./td/span//text()')  # 职务
            zw=re.sub(r'[\]\[\'\s+，]','',str(zw))
            name = tr.xpath(".//a[@class='link-click']/text()")[0] # 名字
            bz = tr.xpath(".//a[@class=' link-vip tips-toco']//text()")  # 备注
            bz = re.sub(r'[\[\],\'\s+]', '', str(bz))
            data.append(gszch)
            data.append(zw)
            data.append(name)
            data.append(bz)
            key_person_list.append(data)
        return key_person_list

    def get_shareholder_info(self, html, gszch):
        '''股东信息'''
        trs = html.xpath("//div[@id='_container_holder']/table/tbody/tr")
        shareholder_info_list = []
        for tr in trs:
            data = []
            try:
                gd = tr.xpath(".//a[@class='link-click']/text()")[0]  # 股东
            except IndexError:
                gd=tr.xpath(".//span/text()")[1]
            czbl = tr.xpath("./td[3]//text()")[0]  # 持股比例
            rjcz = tr.xpath("./td[4]//text()")[0]  # 认缴出资
            data.append(gszch)
            data.append(gd)
            data.append(czbl)
            data.append(rjcz)
            if tr.xpath(".//a[@class='link-vip tips-toco']"):
                bz = tr.xpath(".//a[@class='link-vip tips-toco']//text()")  # 备注
                bz = re.sub(r'[\[\],\'\s+]', '', str(bz))
                data.append(bz)
            else:
                bz = ''
                data.append(bz)
            shareholder_info_list.append(data)
        if html.xpath("//div[@id='_container_pastHolderCount']"):
            trs=html.xpath("//div[@id='_container_pastHolderCount']/table/tbody/tr")
            for tr in trs:
                data = []
                try:
                    gd = tr.xpath(".//a[@class='link-click']/text()")[0]  # 股东
                except IndexError:
                    gd = tr.xpath(".//span/text()")[1]
                czbl = tr.xpath("./td[3]//text()")[0]  # 持股比例
                rjcz = tr.xpath("./td[4]//text()")[0]  # 认缴出资
                data.append(gszch)
                data.append(gd)
                data.append(czbl)
                data.append(rjcz)
                if tr.xpath(".//a[@class=' link-vip tips-toco']"):
                    bz = tr.xpath(".//a[@class=' link-vip tips-toco']//text()")  # 备注
                    bz = re.sub(r'[\[\],\'\s+]', '', str(bz))
                    data.append(bz)
                else:
                    bz = ''
                    data.append(bz)
                shareholder_info_list.append(data)
        return shareholder_info_list

    def past_get_shareholder_info(self, html, gszch):
        '''历史股东信息'''
        trs = html.xpath("//div[@id='_container_pastHolderCount']/table/tbody/tr")
        shareholder_info_list = []
        for tr in trs:
            data = []
            try:
                gd = tr.xpath(".//a[@class='link-click']/text()")[0]  # 股东
            except IndexError:
                gd = tr.xpath(".//span/text()")[1]
            czbl = tr.xpath("./td[3]//text()")[0]  # 持股比例
            rjcz = tr.xpath("./td[4]//text()")[0]  # 认缴出资
            data.append(gszch)
            data.append(gd)
            data.append(czbl)
            data.append(rjcz)
            if tr.xpath(".//a[@class=' link-vip tips-toco']"):
                bz = tr.xpath(".//a[@class='link-vip tips-toco']//text()")  # 备注
                bz = re.sub(r'[\[\],\'\s+]', '', str(bz))
                data.append(bz)
            else:
                bz = ''
                data.append(bz)
            shareholder_info_list.append(data)
            return shareholder_info_list

    def get_stockChangeInfo(self,html,gszch):
        '''股东变更'''
        trs = html.xpath("//div[@id='_container_stockChangeInfo']/table/tbody/tr")
        stockChangeInfo_list = []
        for tr in trs:
            data=[]
            xh=tr.xpath("./td[1]/text()")[0]#序号
            gd=tr.xpath("./td[2]//a[@class='link-click']/text()")[0]#股东
            bgqgq=tr.xpath("./td[3]/text()")[0]#变更前股权
            bghgq=tr.xpath("./td[4]/text()")[0]#变更后股权
            gqbgrq=tr.xpath("./td[5]/text()")[0]#股权变更日期
            gsrq=tr.xpath("./td[6]/text()")[0]#公示日期
            data.append(gszch)
            data.append(xh)
            data.append(gd)
            data.append(bgqgq)
            data.append(bghgq)
            data.append(gqbgrq)
            data.append(gsrq)
            stockChangeInfo_list.append(data)
        return stockChangeInfo_list


    def get_change_record(self, html, gszch,SYSID,url):
        '''变更记录'''
        change_record_list = []
        trs = html.xpath("//*[@id='_container_changeinfo']/div/table/tbody/tr")
        change_record_list=self.parse_change_record(gszch,trs,change_record_list)
        if html.xpath("//div[@id='_container_changeinfo']//ul[@class='pagination']"):
            pn_all=html.xpath("//*[@id='_container_changeinfo']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            change_record_list=self.get_more_change_record(pn_all,SYSID,url,gszch,change_record_list)
        return change_record_list

    def get_more_change_record(self,pn_all,SYSID,url,gszch,change_record_list):
        '''获取更多变更记录'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/changeinfo.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            change_record_list=self.parse_change_record(gszch,trs,change_record_list)
        return change_record_list

    def parse_change_record(self,gszch,trs,change_record_list):
        """解析变更记录"""
        for tr in trs:
            data = []
            bgjl = tr.xpath("./td[2]/text()")[0] # 变更时间
            bgxm = tr.xpath("./td[3]//text()")[0]  # 变更项目
            bgq = tr.xpath("./td[4]//text()")
            bgq = re.sub(r'[\[\]\',\s+]', '', str(bgq))  # 变更前
            bgh = tr.xpath("./td[5]//text()")
            bgh = re.sub(r'[\[\]\',\s+]', '', str(bgh))  # 变更后
            data.append(gszch)
            data.append(bgjl)
            data.append(bgxm)
            data.append(bgq)
            data.append(bgh)
            change_record_list.append(data)
        return change_record_list

    def get_branch(self, html, gszch,SYSID,url):
        '''分支机构'''
        get_branch_list = []
        trs = html.xpath("//*[@id='_container_branch']/table/tbody/tr")
        get_branch_list=self.parse_branch(gszch,trs,get_branch_list)
        if html.xpath("//div[@id='_container_branch']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_branch']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            get_branch_list = self.get_more_branch(pn_all, SYSID, url, gszch, get_branch_list)
        return get_branch_list

    def get_more_branch(self,pn_all,SYSID,url,gszch,get_branch_list):
        '''获取更多分支机构'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/branch.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            get_branch_list=self.parse_branch(gszch,trs,get_branch_list)
        return get_branch_list

    def parse_branch(self,gszch,trs,get_branch_list):
        '''解析分支机构'''
        for tr in trs:
            data = []
            qymc = tr.xpath("./td[2]//a[@class='link-click']/text()")[0]  # 企业名称
            try:
                fddbr = tr.xpath("./td[3]//a[@class='link-click']/text()")[0]  # 法定代表人
            except IndexError:
                fddbr = '-'
            zt = tr.xpath("./td[5]//text()")[0]  # 状态
            zcsj = tr.xpath("./td[4]//text()")[0]  # 注册时间
            data.append(gszch)
            data.append(qymc)
            data.append(fddbr)
            data.append(zt)
            data.append(zcsj)
            get_branch_list.append(data)
        return get_branch_list


    def get_foreign_investment(self, html, gszch,SYSID,url):
        '''对外投资'''
        trs = html.xpath("//*[@id='_container_invest']/div/table/tbody/tr")
        foreign_investment_list = []
        foreign_investment_list=self.parse_foreign_investment(gszch,trs,foreign_investment_list)
        if html.xpath("//div[@id='_container_invest']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_invest']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all=re.sub('[\.]','',str(pn_all))
            foreign_investment_list=self.get_more_foreign_investment(pn_all,SYSID,url,gszch,foreign_investment_list)
        if html.xpath("//div[@id='_container_pastInverstCount']"):
            trs = html.xpath("//*[@id='_container_pastInverstCount']/table/tbody/tr")
            foreign_investment_list=self.parse_foreign_investment(gszch,trs,foreign_investment_list)
        return foreign_investment_list

    def past_get_foreign_investment(self, html, gszch):
        '''历史对外投资'''
        trs = html.xpath("//*[@id='_container_pastInverstCount']/table/tbody/tr")
        foreign_investment_list = []
        foreign_investment_list=self.parse_foreign_investment(gszch,trs,foreign_investment_list)
        return foreign_investment_list

    def get_more_foreign_investment(self,pn_all,SYSID,url,gszch,foreign_investment_list):
        '''获取更多对外投资'''
        for pn in range(2, int(pn_all) + 1):
            url_=f'https://www.tianyancha.com/pagination/invest.xhtml?ps=20&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                     'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs = html.xpath("//table[@class='table -breakall']/tbody/tr")
            foreign_investment_list=self.parse_foreign_investment(gszch,trs,foreign_investment_list)
        return foreign_investment_list

    def parse_foreign_investment(self,gszch,trs,foreign_investment_list):
        '''解析对外投资'''
        for tr in trs:
            data = []
            btzqymc = tr.xpath("./td[2]//a[@class='link-click']/text()")[0]  # 被投资企业名称
            try:
                btzfddmr = tr.xpath("./td[@class='left-col']//a[@class='link-click']/text()")[0]  # 被投资法定代表人
            except IndexError:
                btzfddmr = '-'
            zczb = tr.xpath("./td[5]//text()")[0]  # 注册资本
            tzzb = tr.xpath("./td[6]//text()")[0]  # 投资占比
            zcsj = tr.xpath("./td[4]//text()")[0]  # 注册时间
            zt = tr.xpath("./td[7]//text()")[0]  # 状态
            data.append(gszch)
            data.append(btzqymc)
            data.append(btzfddmr)
            data.append(zczb)
            data.append(tzzb)
            data.append(zcsj)
            data.append(zt)
            foreign_investment_list.append(data)
        return foreign_investment_list


    def get_court_notice(self, html, gszch,SYSID,url):
        '''开庭公告'''
        trs = html.xpath("//*[@id='_container_announcementcourt']/table/tbody/tr")
        court_notice_list = []
        court_notice_list=self.parse_court_notice(gszch,trs,court_notice_list)
        if html.xpath("//div[@id='_container_announcementcourt']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_announcementcourt']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            court_notice_list=self.get_more_court_notice(pn_all,SYSID,url,gszch,court_notice_list)
        if html.xpath("//div[@id='_container_pastAnnouncementCount']"):
            trs = html.xpath("//*[@id='_container_pastAnnouncementCount']/table/tbody/tr")
            court_notice_list = self.parse_court_notice(gszch, trs, court_notice_list)
            if html.xpath("//div[@id='_container_pastAnnouncementCount']//ul[@class='pagination']"):
                pn_all =html.xpath("//div[@id='_container_pastAnnouncementCount']//ul[@class='pagination']/li[last()-1]//text()")[0]
                pn_all = re.sub('[\.]', '', str(pn_all))
                court_notice_list=self.get_more_past_court_notice(pn_all,SYSID,url,gszch,court_notice_list)
        return court_notice_list

    def get_more_court_notice(self,pn_all,SYSID,url,gszch,court_notice_list):
        '''获取多页开庭公告'''
        for pn in range(2, int(pn_all) + 1):
            url_=f'https://www.tianyancha.com/pagination/announcementcourt.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers = {'Accept': '*/*',
                       'Connection': 'keep-alive',
                       'Host': 'www.tianyancha.com',
                       'Referer': f'{url}',
                       'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html = etree.HTML(response.text)
            trs = html.xpath("//table[@class='table']/tbody/tr")
            court_notice_list = self.parse_court_notice(gszch, trs, court_notice_list)
        return court_notice_list

    def get_more_past_court_notice(self,pn_all,SYSID,url,gszch,court_notice_list):
        '''获取多页历史开庭公告'''
        for pn in range(2, int(pn_all) + 1):
            url_=f'https://www.tianyancha.com/pagination/pastAnnouncementCount.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers = {'Accept': '*/*',
                       'Connection': 'keep-alive',
                       'Host': 'www.tianyancha.com',
                       'Referer': f'{url}',
                       'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html = etree.HTML(response.text)
            trs = html.xpath("//table[@class='table']/tbody/tr")
            court_notice_list = self.parse_court_notice(gszch, trs, court_notice_list)
        return court_notice_list

    def get_pastAnnouncementCount(self,html,gszch, SYSID, url):
        '''历史开庭公告'''
        trs = html.xpath("//*[@id='_container_pastAnnouncementCount']/table/tbody/tr")
        court_notice_list = []
        court_notice_list = self.parse_court_notice(gszch, trs, court_notice_list)
        pn_all =html.xpath("//div[@id='_container_pastAnnouncementCount']//ul[@class='pagination']/li[last()-1]//text()")[0]
        court_notice_list = self.get_more_past_court_notice(pn_all, SYSID, url, gszch, court_notice_list)
        return court_notice_list

    def parse_court_notice(self,gszch,trs,court_notice_list):
        '''解析开庭公告'''
        for tr in trs:
            data = []
            ktrq = tr.xpath("./td[2]/text()")[0]  # 开庭日期
            ay = tr.xpath("./td[4]//text()")[0]  # 案由
            yg = tr.xpath("./td[5]//text()")  # 原告
            yg = re.sub(r'[\[\]\',\s+]', '', str(yg))
            bg = tr.xpath("./td[6]//text()")  # 被告
            bg = re.sub(r'[\[\]\',\s+]', '', str(bg))
            data.append(gszch)
            data.append(ktrq)
            data.append(ay)
            data.append(yg)
            data.append(bg)
            court_notice_list.append(data)
        return court_notice_list

    def get_legal_action(self, html, gszch,company_name,SYSID,url):
        '''法律诉讼'''
        trs = html.xpath("//*[@id='_container_lawsuit']/div/table/tbody/tr")
        legal_action_list = []
        legal_action_list=self.parse_legal_action(gszch,trs,legal_action_list)
        if html.xpath("//div[@id='_container_lawsuit']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_lawsuit']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            legal_action_list=self.get_more_legal_action(pn_all,company_name,url,gszch,legal_action_list)
        if html.xpath("//div[@id='_container_pastLawsuitCount']"):
            trs = html.xpath("//*[@id='_container_pastLawsuitCount']/div/div/table/tbody/tr")
            legal_action_list=self.parse_legal_action(gszch,trs,legal_action_list)
            if html.xpath("//div[@id='_container_pastLawsuitCount']//ul[@class='pagination']"):
                pn_all = html.xpath("//div[@id='_container_pastLawsuitCount']//ul[@class='pagination']/li[last()-1]//text()")[0]
                pn_all = re.sub('[\.]', '', str(pn_all))
                legal_action_list = self.get_more_legal_action_past(pn_all,SYSID,url,gszch,legal_action_list)
        return legal_action_list

    def get_more_legal_action(self,pn_all,company_name,url,gszch,legal_action_list):
        '''获取更多法律诉讼'''
        company_name=parse.quote(company_name)
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/lawsuit.xhtml?ps=10&pn={pn}&name={company_name}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response=self.request_max(url_,headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            legal_action_list=self.parse_legal_action(gszch,trs,legal_action_list)
        return legal_action_list

    def get_more_legal_action_past(self,pn_all,SYSID,url,gszch,legal_action_list):
        '''获取更多历史法律诉讼'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/pastLawsuitCount.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table link-warp']/tbody/tr")
            legal_action_list=self.parse_legal_action(gszch,trs,legal_action_list)
        return legal_action_list

    def get_pastLawsuitCount(self,html, gszch,SYSID, url):
        '''历史法律诉讼'''
        trs = html.xpath("//*[@id='_container_pastLawsuitCount']/div/div/table/tbody/tr")
        legal_action_list = []
        legal_action_list=self.parse_legal_action(gszch,trs,legal_action_list)
        if html.xpath("//div[@id='_container_pastLawsuitCount']//ul[@class='pagination']"):
            pn_all =html.xpath("//div[@id='_container_pastLawsuitCount']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            legal_action_list = self.get_more_legal_action_past(pn_all, SYSID, url, gszch, legal_action_list)
        return legal_action_list

    def parse_legal_action(self,gszch,trs,legal_action_list):
        '''解析法律诉讼'''
        for tr in trs:
            data = []
            rq = tr.xpath("./td[2]//text()")[0]  # 日期
            pjws = tr.xpath("./td[3]/text()")[0]  # 判决文书
            ajlx = tr.xpath("./td[4]//text()")[0]  # 案件类型
            ajh = tr.xpath("./td[6]//text()")[0]  # 案件号
            data.append(gszch)
            data.append(rq)
            data.append(pjws)
            data.append(ajlx)
            data.append(ajh)
            legal_action_list.append(data)
        return legal_action_list

    def get_court_announcement(self, html, gszch,company_name,SYSID,url):
        '''法院公告'''
        trs = html.xpath("//*[@id='_container_court']/table/tbody/tr")
        court_announcement_list = []
        court_announcement_list=self.parse_court_announcement(gszch,trs,court_announcement_list)
        if html.xpath("//div[@id='_container_court']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_court']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            court_announcement_list=self.get_more_court_announcement(pn_all,company_name,url,gszch,court_announcement_list)
        if html.xpath("//div[@id='_container_pastCourtCount']"):
            trs = html.xpath("//*[@id='_container_pastCourtCount']/table/tbody/tr")
            court_announcement_list=self.parse_court_announcement(gszch,trs,court_announcement_list)
            if html.xpath("//div[@id='_container_pastCourtCount']//ul[@class='pagination']"):
                pn_all = html.xpath("//div[@id='_container_pastCourtCount']//ul[@class='pagination']/li[last()-1]//text()")[0]
                pn_all = re.sub('[\.]', '', str(pn_all))
                court_announcement_list = self.get_more_court_announcement_past(pn_all, SYSID, url,gszch,court_announcement_list)
        return court_announcement_list

    def get_more_court_announcement(self,pn_all,company_name,url,gszch,court_announcement_list):
        '''获取更多法院公告'''
        company_name=parse.quote(company_name)
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/court.xhtml?ps=10&pn={pn}&name={company_name}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            court_announcement_list=self.parse_court_announcement(gszch,trs,court_announcement_list)
        return court_announcement_list

    def get_more_court_announcement_past(self,pn_all,SYSID,url,gszch,court_announcement_list):
        '''获取更多历史法院公告'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/pastCourtCount.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table link-warp']/tbody/tr")
            court_announcement_list=self.parse_court_announcement(gszch,trs,court_announcement_list)
        return court_announcement_list

    def get_pastCourtCount(self,html, gszch,SYSID, url):
        '''历史法院公告'''
        trs = html.xpath("//*[@id='_container_pastCourtCount']/table/tbody/tr")
        court_announcement_list = []
        court_announcement_list=self.parse_court_announcement(gszch,trs,court_announcement_list)
        if html.xpath("//div[@id='_container_pastCourtCount']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_pastCourtCount']//ul[@class='pagination']/li[last()-1]//text()")[
                0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            court_announcement_list = self.get_more_court_announcement_past(pn_all, SYSID, url, gszch,court_announcement_list)
        return court_announcement_list

    def parse_court_announcement(self,gszch,trs,court_announcement_list):
        '''解析法院公告'''
        for tr in trs:
            data = []
            ggsj = tr.xpath("./td[2]/text()")[0]  # 公告时间
            ssf = tr.xpath("./td[3]//text()")[0]  # 上诉方
            bsf = tr.xpath("./td[4]//text()")[0]  # 被诉方
            gglx = tr.xpath("./td[5]//text()")[0]  # 公告类型
            fy = tr.xpath("./td[6]//text()")[0]  # 法院
            xq_href = tr.xpath("./td[7]//a/@href")[0]
            xq = self.get_detail_court_announcement(xq_href)  # 详情
            data.append(gszch)
            data.append(ggsj)
            data.append(ssf)
            data.append(bsf)
            data.append(gglx)
            data.append(fy)
            data.append(xq)
            court_announcement_list.append(data)
        return court_announcement_list

    def get_detail_court_announcement(self, xq_href):
        '''法院公告 详情获取 bug'''
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
            'Referer': f'{xq_href}',
            'Host': 'www.tianyancha.com',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'}
        time.sleep(2)
        response = requests.get(xq_href, headers=headers, cookies=self.cookies, proxies=self.proxy)
        # html=etree.HTML(response.text)
        # xq=html.xpath("//table[@class='table -striped-col']/tbody/tr[last()]/td[2]//text()")
        # xq=html.xpath("//*[@id='web-content']/div/div[2]/div[1]/div[2]/table/tbody/tr[7]/td[2]/text()")
        try:
            xq = re.findall('<td class="table-left">法院公告内容</td><td>(.*?)</td></tr></table></div>', response.text)[0]
        except IndexError:
            xq='-'
        return xq

    def get_execute_person(self, html, gszch,SYSID,url):
        '''被执行人 请求的//table[@class='table']/tbody/tr  可能是//table[@class='table link-warp']/tbody/tr'''
        trs = html.xpath("//*[@id='_container_zhixing']/table/tbody/tr")
        execute_person_list = []
        execute_person_list=self.parse_execute_person(gszch,trs,execute_person_list)
        if html.xpath("//div[@id='_container_zhixing']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_zhixing']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            execute_person_list=self.get_more_execute_person(pn_all,SYSID,url,gszch,execute_person_list)
        if html.xpath("//div[@id='_container_pastZhixing']"):
            trs = html.xpath("//*[@id='_container_pastZhixing']/table/tbody/tr")
            execute_person_list = self.parse_execute_person(gszch, trs, execute_person_list)
            if html.xpath("//div[@id='_container_pastZhixing']//ul[@class='pagination']"):
                pn_all = html.xpath("//div[@id='_container_pastZhixing']//ul[@class='pagination']/li[last()-1]//text()")[0]
                pn_all = re.sub('[\.]', '', str(pn_all))
                execute_person_list = self.get_more_execute_person_past(pn_all,SYSID,url,gszch,execute_person_list)
        return execute_person_list

    def get_more_execute_person(self,pn_all,SYSID,url,gszch,execute_person_list):
        '''获取更多页被执行人'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/zhixing.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            execute_person_list=self.parse_execute_person(gszch,trs,execute_person_list)
        return execute_person_list

    def get_more_execute_person_past(self,pn_all,SYSID,url,gszch,execute_person_list):
        '''获取更多页历史被执行人'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/pastZhixing.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            execute_person_list=self.parse_execute_person(gszch,trs,execute_person_list)
        return execute_person_list

    def get_past_execute_person(self,html,gszch, SYSID, url):
        '''历史被执行人'''
        execute_person_list = []
        trs = html.xpath("//*[@id='_container_pastZhixing']/table/tbody/tr")
        execute_person_list = self.parse_execute_person(gszch, trs, execute_person_list)
        if html.xpath("//div[@id='_container_pastZhixing']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_pastZhixing']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            execute_person_list = self.get_more_execute_person_past(pn_all, SYSID, url, gszch, execute_person_list)
        return execute_person_list

    def parse_execute_person(self,gszch,trs,execute_person_list):
        '''解析被执行人'''
        for tr in trs:
            data = []
            larq = tr.xpath("./td[2]/text()")[0]  # 立案日期
            zxbd = tr.xpath("./td[3]/text()")[0]  # 执行标的
            ah = tr.xpath("./td[4]/text()")[0]  # 案号
            zxfy = tr.xpath("./td[5]/text()")[0]  # 执行法院
            data.append(gszch)
            data.append(larq)
            data.append(zxbd)
            data.append(ah)
            data.append(zxfy)
            execute_person_list.append(data)
        return execute_person_list


    def get_abnormal_operation(self, html, gszch):
        '''经营异常'''
        abnormal_operation_list = []
        trs = html.xpath("//*[@id='_container_abnormalRemove']/table/tbody/tr")
        for tr in trs:
            data = []
            lrrq = tr.xpath("./td[2]/text()")[0]  # 列入日期
            lryy = tr.xpath("./td[3]/text()")[0]  # 列入原因
            jdjg = tr.xpath("./td[4]/text()")[0]  # 决定机关/移除机关
            ycrq = tr.xpath("./td[5]/text()")[0]  # 移除日期
            ycyy = tr.xpath("./td[6]/text()")[0]  # 移除原因
            data.append(gszch)
            data.append(lrrq)
            data.append(lryy)
            data.append(jdjg)
            data.append(ycrq)
            data.append(ycyy)
            data.append(jdjg)
            abnormal_operation_list.append(data)
        return abnormal_operation_list

    def get_abnormal_operation_put(self, html, gszch):
        '''经营异常(列入)'''
        container_abnormalPut_list = []
        trs = html.xpath("//*[@id='_container_abnormalPut']/table/tbody/tr")
        for tr in trs:
            data = []
            lrrq = tr.xpath("./td[2]/text()")[0]  # 列入日期
            lryy = tr.xpath("./td[4]/text()")[0]  # 列入原因
            jdjg = tr.xpath("./td[3]/text()")[0]  # 决定机关/移除机关
            data.append(gszch)
            data.append(lrrq)
            data.append(lryy)
            data.append(jdjg)
            data.append('-')
            data.append('-')
            data.append('-')
            container_abnormalPut_list.append(data)
        return container_abnormalPut_list

    def get_administrative_penalty(self, html, gszch, company_name,SYSID,url,text):
        '''行政处罚'''
        administrative_penalty_list = []
        trs = html.xpath("//*[@id='_container_punish']/table/tbody/tr")
        administrative_penalty_list=self.parse_administrative_penalty(gszch,trs,administrative_penalty_list,text)
        if html.xpath("//div[@id='_container_punish']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_punish']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            administrative_penalty_list=self.get_more_administrative_penalty(pn_all,company_name,url,gszch,administrative_penalty_list)


        if html.xpath("//div[@id='_container_punishmentCreditchina']"):
            trs = html.xpath("//*[@id='_container_punishmentCreditchina']/table/tbody/tr")
            administrative_penalty_list = self.parse_administrative_penalty(gszch, trs, administrative_penalty_list,text)
            if html.xpath("//div[@id='_container_punishmentCreditchina']//ul[@class='pagination']"):
                pn_all = html.xpath("//div[@id='_container_punishmentCreditchina']//ul[@class='pagination']/li[last()-1]//text()")[0]
                pn_all = re.sub('[\.]', '', str(pn_all))
                for pn in range(2, int(pn_all) + 1):
                    url_ = f'https://www.tianyancha.com/pagination/punishmentCreditchina.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
                    headers = {'Accept': '*/*',
                               'Connection': 'keep-alive',
                               'Host': 'www.tianyancha.com',
                               'Referer': f'{url}',
                               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
                    response = self.request_max(url_, headers)
                    html_ = etree.HTML(response.text)
                    trs = html_.xpath(f"//table[@class='table -breakall']/tbody/tr")
                    administrative_penalty_list = self.parse_administrative_penalty(gszch, trs, administrative_penalty_list,text)


        if html.xpath("//div[@id='_container_pastPunishmentIC']"):
            trs = html.xpath("//*[@id='_container_pastPunishmentIC']/table/tbody/tr")
            administrative_penalty_list=self.parse_administrative_penalty_past(gszch,trs,administrative_penalty_list)
            if html.xpath("//div[@id='_container_pastPunishmentIC']//ul[@class='pagination']"):
                pn_all = html.xpath("//div[@id='_container_pastPunishmentIC']//ul[@class='pagination']/li[last()-1]//text()")[0]
                pn_all = re.sub('[\.]', '', str(pn_all))
                print(pn_all)
                administrative_penalty_list = self.get_more_administrative_penalty_past(pn_all,SYSID,url,gszch,administrative_penalty_list)
        return administrative_penalty_list

    def get_more_administrative_penalty(self,pn_all,company_name,url,gszch,administrative_penalty_list):
        '''获取多页行政处罚'''
        company_name = parse.quote(company_name)
        for pn in range(2, int(pn_all) + 1):
            url_=f'https://www.tianyancha.com/pagination/punish.xhtml?ps=10&pn={pn}&name={company_name}&_={int(round(time.time() * 1000))}'
            headers = {'Accept': '*/*',
                       'Connection': 'keep-alive',
                       'Host': 'www.tianyancha.com',
                       'Referer': f'{url}',
                       'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html = etree.HTML(response.text)
            trs = html.xpath("//table[@class='table']/tbody/tr")
            administrative_penalty_list = self.parse_administrative_penalty_past(gszch, trs, administrative_penalty_list)
        return administrative_penalty_list

    def get_more_administrative_penalty_past(self,pn_all,SYSID,url,gszch,administrative_penalty_list):
        '''获取多页历史行政处罚  bug，不知是用name 还是 id 作为参数请求'''
        for pn in range(2, int(pn_all) + 1):
            url_=f'https://www.tianyancha.com/pagination/pastPunishmentIC.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers = {'Accept': '*/*',
                       'Connection': 'keep-alive',
                       'Host': 'www.tianyancha.com',
                       'Referer': f'{url}',
                       'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html = etree.HTML(response.text)
            trs = html.xpath("//table[@class='table']/tbody/tr")
            administrative_penalty_list = self.parse_administrative_penalty_past(gszch, trs, administrative_penalty_list)
        return administrative_penalty_list

    def get_administrative_penalty_past(self,html, gszch,SYSID,url):
        '''历史行政处罚'''
        administrative_penalty_list = []
        trs = html.xpath("//*[@id='_container_pastPunishmentIC']/table/tbody/tr")
        administrative_penalty_list = self.parse_administrative_penalty_past(gszch, trs, administrative_penalty_list)
        if html.xpath("//div[@id='_container_pastPunishmentIC']//ul[@class='pagination']"):
            pn_all =html.xpath("//div[@id='_container_pastPunishmentIC']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            administrative_penalty_list = self.get_more_administrative_penalty_past(pn_all,SYSID,url,gszch,administrative_penalty_list)
        return administrative_penalty_list

    def parse_administrative_penalty(self,gszch,trs,administrative_penalty_list,text):
        '''解析行政处罚'''
        cnt = 1
        for tr in trs:
            data = []
            jdrq = tr.xpath("./td[2]/text()")[0]  # 决定日期
            jdswh = tr.xpath("./td[3]/text()")[0]  # 决定书文号
            jdjg = tr.xpath("./td[5]/text()")[0]  # 决定机关
            cflr = tr.xpath("./td[4]/text()")[0]  # 处罚内容
            try:
                lx = re.findall(
                    '<td >%d</td><td>\d{4}-\d{2}-\d{2}</td><td class="left-col">.*?","decisionDate":"\d{4}-\d{2}-\d{2}",.*?"type":"(.*?)",.*?</script><span class="link-click" ' % (
                        cnt), text)[0]
                lx = re.sub(r'[\s+]', '', str(lx))
            except IndexError:
                lx = '-'
            data.append(gszch)
            data.append(jdrq)
            data.append(jdswh)
            data.append(jdjg)
            data.append(cflr)
            data.append(lx)
            administrative_penalty_list.append(data)
            cnt += 1
        return administrative_penalty_list

    def parse_administrative_penalty_past(self,gszch,trs,administrative_penalty_list):
        '''解析历史行政处罚'''
        for tr in trs:
            data = []
            jdrq = tr.xpath("./td[2]/text()")[0]  # 决定日期
            jdswh = tr.xpath("./td[3]/text()")[0]  # 决定书文号
            jdjg = tr.xpath("./td[5]/text()")[0]  # 决定机关
            cflr = tr.xpath("./td[4]/text()")[0]  # 处罚内容
            lx = '-'
            data.append(gszch)
            data.append(jdrq)
            data.append(jdswh)
            data.append(jdjg)
            data.append(cflr)
            data.append(lx)
            administrative_penalty_list.append(data)
        return administrative_penalty_list

    def get_serious_illegal(self, html, gszch):
        '''严重违法'''
        serious_illegal_list = []
        trs = html.xpath("//*[@id='_container_illegalPut']/table/tbody/tr")
        for tr in trs:
            data = []
            lrrq = tr.xpath("./td[2]/text()")[0]  # 列入日期
            lryy = tr.xpath("./td[4]/text()")[0]  # 列入原因
            jdjg = tr.xpath("./td[3]/text()")[0]  # 决定机关
            data.append(gszch)
            data.append(lrrq)
            data.append(lryy)
            data.append(jdjg)
            serious_illegal_list.append(data)
        return serious_illegal_list

    def get_equity_pledge(self,  html, gszch, company_name,SYSID,url):
        '''股权出质'''
        equity_pledge_list = []
        trs = html.xpath("//*[@id='_container_equity']/table/tbody/tr")
        equity_pledge_list=self.parse_equity_pledge(gszch,trs,equity_pledge_list)
        if html.xpath("//div[@id='_container_equity']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_equity']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            equity_pledge_list=self.get_more_equity_pledge(pn_all,company_name,url,gszch,equity_pledge_list)
        if html.xpath("//div[@id='_container_pastEquityCount']"):
            trs = html.xpath("//*[@id='_container_pastEquityCount']/table/tbody/tr")
            equity_pledge_list = self.parse_equity_pledge(gszch, trs, equity_pledge_list)
            if html.xpath("//div[@id='_container_pastEquityCount']//ul[@class='pagination']"):
                pn_all = html.xpath("//div[@id='_container_pastEquityCount']//ul[@class='pagination']/li[last()-1]//text()")[0]
                pn_all = re.sub('[\.]', '', str(pn_all))
                equity_pledge_list = self.get_more_equity_pledge_past(pn_all, SYSID, url, gszch, equity_pledge_list)
        return equity_pledge_list

    def get_more_equity_pledge(self,pn_all,company_name,url,gszch,equity_pledge_list):
        '''获取更多股权出质'''
        company_name=parse.quote(company_name)
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/equity.xhtml?ps=10&pn={pn}&name={company_name}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table -sort']/tbody/tr")
            equity_pledge_list=self.parse_equity_pledge(gszch,trs,equity_pledge_list)
        return equity_pledge_list

    def get_more_equity_pledge_past(self,pn_all,SYSID,url,gszch,equity_pledge_list):
        '''获取更多历史股权出质'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/pastEquityCount.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table link-warp']/tbody/tr")
            equity_pledge_list=self.parse_equity_pledge(gszch,trs,equity_pledge_list)
        return equity_pledge_list

    def get_equity_pledge_past(self,html, gszch,SYSID, url):
        '''历史股权出质'''
        equity_pledge_list = []
        trs = html.xpath("//*[@id='_container_pastEquityCount']/table/tbody/tr")
        equity_pledge_list = self.parse_equity_pledge(gszch, trs, equity_pledge_list)
        if html.xpath("//div[@id='_container_pastEquityCount']//ul[@class='pagination']"):
            pn_all = \
            html.xpath("//div[@id='_container_pastEquityCount']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            equity_pledge_list = self.get_more_equity_pledge_past(pn_all, SYSID, url, gszch, equity_pledge_list)
        return equity_pledge_list

    def parse_equity_pledge(self,gszch,trs,equity_pledge_list):
        '''解析股权出质'''
        for tr in trs:
            data = []
            ggsj = tr.xpath("./td[2]/text()")[0]  # 公告时间
            djbm = tr.xpath("./td[3]/text()")[0]  # 登记编码
            czr = tr.xpath("./td[4]//text()")  # 出质人
            zqr = tr.xpath("./td[5]//text()")[0]  # 质权人
            zt = tr.xpath("./td[6]//text()")[0]  # 状态
            czr = ''.join(czr)
            data.append(gszch)
            data.append(ggsj)
            data.append(djbm)
            data.append(czr)
            data.append(zqr)
            data.append(zt)
            equity_pledge_list.append(data)
        return equity_pledge_list

    def get_chattel_mortgage(self, html, gszch, text, url):
        '''动产抵押'''
        chattel_mortgage_list = []
        trs = html.xpath("//*[@id='_container_mortgage']/table/tbody/tr")
        for tr in trs:
            data = []
            djrq = tr.xpath("./td[2]/text()")[0]  # 等级日期
            djh = tr.xpath("./td[3]/text()")[0]  # 登记号
            bdbzqlx = tr.xpath("./td[4]/text()")[0]  # 被担保债权类型
            bdbzqse = tr.xpath("./td[5]/text()")[0]  # 被担保债权数额
            djjg = tr.xpath("./td[6]/text()")[0]  # 登记机关
            try:
                zt=tr.xpath("./td[7]/text()")[0]#状态
            except IndexError:
                zt='-'
            xq = self.get_chattel_mortgage_details(text, url)
            data.append(gszch)
            data.append(djrq)
            data.append(djh)
            data.append(bdbzqlx)
            data.append(bdbzqse)
            data.append(djjg)
            data.append(zt)
            data.append(xq)

            chattel_mortgage_list.append(data)
        return chattel_mortgage_list

    def get_chattel_mortgage_details(self, text, url):
        '''动产抵押的详情信息'''
        businessId = re.findall(r"onclick='openMortgageInfoDetail\(\"(.*?)\"\)'>详情</span>", text)[0]
        _url = f'https://capi.tianyancha.com/cloud-operating-risk/operating/chattelMortgage/getMortgageDetail?businessId={businessId}&_={int(round(time.time() * 1000))}'
        headers = {'Origin': 'https://www.tianyancha.com',
                   'Referer': url,
                   'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
                   'version': 'TYC-Web',
                   'X-AUTH-TOKEN': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODk4MTkzNjA0NyIsImlhdCI6MTU3MjgzODg4MCwiZXhwIjoxNjA0Mzc0ODgwfQ.O09cZ8pZLTSmNyFc2bXoOHNWhxYLs8ZMRF8NpSVJiGpHL6KoTWekfmYCD5969LXYgI4cwMJODAGjL8nR6rZUng',
                   }
        response = requests.get(_url, headers=headers, cookies=self.cookies, timeout=10)
        data_all = json.loads(response.text)
        xq = data_all.get('data').get('baseInfo').get('scope')
        if xq is None:
            xq = '-'
        return xq

    def get_owing_taxes_notice(self, html, gszch):
        '''欠税公告  '''
        owing_taxes_notice_list = []
        trs = html.xpath("//*[@id='_container_towntax']/table/tbody/tr")
        for tr in trs:
            data = []
            fbrq = tr.xpath("./td[2]/text()")[0]  # 发布日期
            nsrsbh = tr.xpath("./td[3]/text()")[0]  # 纳税人识别号
            qssz = tr.xpath("./td[4]/text()")[0]  # 欠税税种
            dqfsdqse = tr.xpath("./td[5]/text()")[0]  # 当前发生的欠税额
            qsye = tr.xpath("./td[6]/text()")[0]  # 欠税余额
            swjg = tr.xpath("./td[7]/text()")[0]  # 税务机关
            data.append(gszch)
            data.append(fbrq)
            data.append(nsrsbh)
            data.append(qssz)
            data.append(dqfsdqse)
            data.append(qsye)
            data.append(swjg)
            owing_taxes_notice_list.append(data)
        return owing_taxes_notice_list

    def get_judicial_sale(self, html, gszch,SYSID,url):
        '''司法拍卖'''
        judicial_sale_list = []
        trs = html.xpath("//*[@id='_container_judicialSale']/table/tbody/tr")
        judicial_sale_list=self.parse_judicial_sale(gszch,trs,judicial_sale_list)
        if html.xpath("//div[@id='_container_judicialSale']//ul[@class='pagination']"):
            pn_all=html.xpath("//div[@id='_container_judicialSale']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            judicial_sale_list=self.get_more_judicial_sale(pn_all,SYSID,url,gszch,judicial_sale_list)
        return judicial_sale_list

    def get_more_judicial_sale(self,pn_all,SYSID,url,gszch,judicial_sale_list):
        '''获取更多司法拍卖'''
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/judicialSale.xhtml?ps=10&pn={pn}&id={SYSID}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            judicial_sale_list=self.parse_judicial_sale(gszch,trs,judicial_sale_list)
        return judicial_sale_list


    def parse_judicial_sale(self,gszch,trs,judicial_sale_list):
        '''解析司法拍卖'''
        for tr in trs:
            data = []
            pmgg = tr.xpath("./td[3]/text()")[0]  # 拍卖公告
            ggrq = tr.xpath("./td[2]/text()")[0]  # 公告日期
            zxfy = tr.xpath("./td[5]/text()")[0]  # 执行法院
            pmbd = tr.xpath("./td[4]//text()")  # 拍卖标的
            pmbd = re.sub(r'[\[\]\',\s+]', '', str(pmbd))
            data.append(gszch)
            data.append(pmgg)
            data.append(ggrq)
            data.append(zxfy)
            data.append(pmbd)
            judicial_sale_list.append(data)
        return judicial_sale_list

    def get_core_team(self, html, gszch,company_name,url):
        '''核心团队'''
        core_team_list = []
        trs = html.xpath("//*[@id='_container_teamMember']/div/table/tbody/tr")
        core_team_list=self.parse_core_team(gszch,trs,core_team_list)
        company_name = parse.quote(company_name)
        if html.xpath("//div[@id='_container_teamMember']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_teamMember']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            for pn in range(2, int(pn_all) + 1):
                core_team_list=self.get_more(url,gszch,core_team_list,'teamMember',10,pn,'name',company_name,'table',self.parse_core_team)
        return core_team_list

    def parse_core_team(self,gszch,trs,core_team_list):
        '''解析核心团队'''
        for tr in trs:
            data = []
            try:
                xm = tr.xpath("./td[2]//a/text()")[0]  # 姓名
            except IndexError:
                xm = tr.xpath("./td[2]//span/text()")[1]
            zw = tr.xpath("./td[3]/text()")[0]  # 职务
            jj = tr.xpath("./td[4]/div/div/text()")[0]  # 简介
            jj = re.sub(r'[\s+]', '', str(jj))
            data.append(gszch)
            data.append(xm)
            data.append(zw)
            data.append(jj)
            core_team_list.append(data)
        return core_team_list

    def get_business_events(self, html, gszch):
        '''企业业务'''
        business_events_list = []
        trs = html.xpath("//*[@id='_container_firmProduct']/table/tbody/tr")
        for tr in trs:
            data = []
            mc = tr.xpath("./td[2]//a/text()")[0]  # 名称
            ly = tr.xpath("./td[5]/a/text()")[0]  # 领域
            ywjs = tr.xpath("./td[7]/div/div/text()")[0]  # 业务介绍
            data.append(gszch)
            data.append(mc)
            data.append(ly)
            data.append(ywjs)
            business_events_list.append(data)
        return business_events_list

    def get_investment_event(self, html, gszch,company_name,url):
        '''投资事件'''
        investment_event_list = []
        trs = html.xpath("//*[@id='_container_touzi']/table/tbody/tr")
        investment_event_list=self.parse_investment_event(gszch,trs,investment_event_list)
        if html.xpath("//div[@id='_container_touzi']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_touzi']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            investment_event_list = self.get_more_investment_event(pn_all, company_name, url, gszch, investment_event_list)
        return investment_event_list

    def get_more_investment_event(self,pn_all,company_name,url,gszch,investment_event_list):
        '''获取更多投资事件'''
        company_name = parse.quote(company_name)
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/touzi.xhtml?ps=10&pn={pn}&name={company_name}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response=self.request_max(url_,headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            investment_event_list=self.parse_investment_event(gszch,trs,investment_event_list)
        return investment_event_list

    def request_max(self,url,headers):
        requests.adapters.DEFAULT_RETRIES = 15  # 增加重连次数
        s = requests.session()
        s.keep_alive = False  # 关闭多余连接
        response = s.get(url, headers=headers, cookies=self.cookies)  # 你需要的网址
        return response


    def parse_investment_event(self,gszch,trs,investment_event_list):
        '''解析投资事件'''
        for tr in trs:
            data = []
            sj = tr.xpath("./td[2]/text()")[0]  # 时间
            lc = tr.xpath("./td[3]/text()")[0]  # 轮次
            je = tr.xpath("./td[5]/text()")[0]  # 金额
            tzf = tr.xpath("./td[4]//text()")  # 投资方
            tzf = re.sub(r'[\[\],\'\s+]', '', str(tzf))
            cp = tr.xpath("./td[6]//a/text()")[0]  # 产品
            dq = tr.xpath("./td[8]/text()")[0]  # 地区
            hy = tr.xpath("./td[7]/a/text()")[0]  # 行业
            yw = tr.xpath("./td[9]/text()")[0]  # 业务
            data.append(gszch)
            data.append(sj)
            data.append(lc)
            data.append(je)
            data.append(tzf)
            data.append(cp)
            data.append(dq)
            data.append(hy)
            data.append(yw)
            investment_event_list.append(data)
        return investment_event_list

    def get_competing_goods_information(self, html, gszch,company_name,url):
        '''竞品信息'''
        competing_goods_information_list = []
        trs = html.xpath("//*[@id='_container_jingpin']/div/table/tbody/tr")
        competing_goods_information_list = self.parse_competing_goods_information(gszch, trs, competing_goods_information_list)
        if html.xpath("//div[@id='_container_jingpin']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_jingpin']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            competing_goods_information_list = self.get_more_goods_information(pn_all, company_name, url, gszch, competing_goods_information_list)
        return competing_goods_information_list

    def get_more_goods_information(self,pn_all,company_name,url,gszch,competing_goods_information_list):
        '''获取更多竞品信息'''
        company_name = parse.quote(company_name)
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/jingpin.xhtml?ps=10&pn={pn}&name={company_name}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table']/tbody/tr")
            competing_goods_information_list=self.parse_competing_goods_information(gszch,trs,competing_goods_information_list)
        return competing_goods_information_list


    def parse_competing_goods_information(self,gszch,trs,competing_goods_information_list):
        '''解析竞品信息'''
        for tr in trs:
            data = []
            try:
                cp = tr.xpath("./td[2]//a/text()")[0]  # 产品
            except IndexError:
                cp=tr.xpath("./td[2]//tr//span/text()")[0]
            dq = tr.xpath("./td[7]/text()")[0]  # 地区
            dqlc = tr.xpath("./td[3]/text()")[0]  # 当前轮次
            hy = tr.xpath("./td[6]//text()")[0]  # 行业
            yw = tr.xpath("./td[8]/text()")[0]  # 业务
            clsj = tr.xpath("./td[5]/text()")[0]  # 成立时间
            data.append(gszch)
            data.append(cp)
            data.append(dq)
            data.append(dqlc)
            data.append(hy)
            data.append(yw)
            data.append(clsj)
            competing_goods_information_list.append(data)
        return competing_goods_information_list


    def get_tax_credit_rating(self, html, gszch):
        '''税务评级'''
        tax_credit_rating_list = []
        trs = html.xpath("//*[@id='_container_taxcredit']/table/tbody/tr")
        for tr in trs:
            data = []
            nf = tr.xpath("./td[2]/text()")[0]  # 年份
            nspj = tr.xpath("./td[3]//text()")[0]  # 纳税评级
            lx = tr.xpath("./td[4]/text()")[0]  # 类型
            nsrsbh = tr.xpath("./td[5]/text()")[0]  # 纳税人识别号
            pjdw = tr.xpath("./td[6]/text()")[0]  # 评价单位
            data.append(gszch)
            data.append(nf)
            data.append(nspj)
            data.append(lx)
            data.append(nsrsbh)
            data.append(pjdw)
            tax_credit_rating_list.append(data)
        return tax_credit_rating_list

    def get_check(self, html, gszch):
        '''抽查检查'''
        check_list = []
        trs = html.xpath("//*[@id='_container_check']/div/table/tbody/tr")
        for tr in trs:
            data = []
            rq = tr.xpath("./td[2]/text()")[0]  # 日期
            lx = tr.xpath("./td[3]/text()")[0]  # 类型
            jg = tr.xpath("./td[4]/text()")[0]  # 结果
            jcssjg = tr.xpath("./td[5]/text()")[0]  # 检查实施机关
            data.append(gszch)
            data.append(rq)
            data.append(lx)
            data.append(jg)
            data.append(jcssjg)
            check_list.append(data)
        return check_list

    def get_qualification_certificate(self, html, gszch,SYSID,url):
        '''资质证书'''
        qualification_certificate_list = []
        trs = html.xpath("//*[@id='_container_certificate']/div/table/tbody/tr")
        qualification_certificate_list=self.parse_qualification_certificate(gszch,trs,qualification_certificate_list)
        if html.xpath("//div[@id='_container_certificate']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_certificate']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            print(pn_all)
            for pn in range(2, int(pn_all) + 1):
                qualification_certificate_list=self.get_more(url,gszch,qualification_certificate_list,'certificate',10,pn,'id',SYSID,'table -sort',self.parse_qualification_certificate)
        return qualification_certificate_list

    def parse_qualification_certificate(self,gszch,trs,qualification_certificate_list):
        for tr in trs:
            data = []
            zslx = tr.xpath("./td[3]//text()")[0]  # 证书类型
            zsbm = tr.xpath("./td[4]//text()")[0]  # 证书编码
            fzrq = tr.xpath("./td[2]//text()")[0]  # 发证日期
            jzrq = tr.xpath("./td[5]//text()")[0]  # 截止日期
            data.append(gszch)
            data.append(zslx)
            data.append(zsbm)
            data.append(fzrq)
            data.append(jzrq)
            qualification_certificate_list.append(data)
        return   qualification_certificate_list

    def get_bid(self, html, gszch,url,SYSID):
        '''招投标'''
        bid_list = []
        trs = html.xpath("//*[@id='_container_bid']/table/tbody/tr")
        bid_list = self.parse_bid(gszch, trs, bid_list)
        if html.xpath("//div[@id='_container_bid']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_bid']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            for pn in range(2, int(pn_all) + 1):
                bid_list=self.get_more(url,gszch,bid_list,'bid',10,pn,'id',SYSID,'table',self.parse_bid)
        return bid_list

    def parse_bid(self,gszch,trs,bid_list):
        for tr in trs:
            data = []
            fbsj = tr.xpath("./td[2]/text()")[0]  # 发布时间
            bt = tr.xpath("./td[3]//text()")[0]  # 标题
            cgr = tr.xpath("./td[4]/text()")[0]  # 采购人
            data.append(gszch)
            data.append(fbsj)
            data.append(bt)
            data.append(cgr)
            bid_list.append(data)
        return bid_list

    def get_recruit(self, html, gszch,url,SYSID):
        '''招聘   详情需要登录百度获取cookies'''
        recruit_list = []
        trs = html.xpath("//*[@id='_container_baipin']/table/tbody/tr")
        recruit_list=self.parse_recruit(gszch,trs,recruit_list)
        if html.xpath("//div[@id='_container_baipin']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_baipin']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            for pn in range(2, int(pn_all) + 1):
                recruit_list=self.get_more(url,gszch,recruit_list,'baipin',10,pn,'id',SYSID,'table',self.parse_recruit)
        return recruit_list

    def get_detail_recruit(self, xq_href,url):
        '''招聘 详情获取   需要百度cookies'''
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
            'Referer': url,
            'Host': 'zhaopin.baidu.com',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'}
        time.sleep(2)
        response = requests.get(xq_href, headers=headers, cookies=self.cookies, proxies=self.proxy,allow_redirects=False)
        print(response.status_code)
        new_url = response.headers["Location"]
        print(new_url)
        response=requests.get(new_url,headers=headers, cookies=self.cookies, proxies=self.proxy)
        print(response.status_code)
        print(response.text)
        html=etree.HTML(response.text)
        zprs=html.xpath("//*[@id='main']/div[1]/div[3]/span/text()")[0]
        print(zprs)
        xq=html.xpath("//*[@id='main']/div[3]/div[1]/div[1]/div[4]/div/p/text()")[0]
        print(xq)

    def parse_recruit(self,gszch,trs,recruit_list):
        for tr in trs:
            data = []
            fbsj = tr.xpath("./td[2]/text()")[0]  # 发布时间
            zpzw = tr.xpath("./td[3]/text()")[0]  # 招聘职位
            xz = tr.xpath("./td[4]/text()")[0]  # 薪资
            gzjy = tr.xpath("./td[6]/text()")[0]  # 工作经验
            szcs = tr.xpath("./td[7]/text()")[0]  # 所在城市
            xq_href=tr.xpath("./td[8]/a/@href")[0]#详情地址
            # self.get_detail_recruit(xq_href,url)
            zprs='-'
            xq=''
            data.append(gszch)
            data.append(fbsj)
            data.append(zpzw)
            data.append(xz)
            data.append(gzjy)
            data.append(zprs)
            data.append(szcs)
            data.append(xq)
            recruit_list.append(data)
        return recruit_list

    def get_wechat(self, html, gszch,SYSID,url):
        '''微信公众号'''
        wechat_list=[]
        trs = html.xpath("//*[@id='_container_wechat']/table/tbody/tr")
        wechat_list=self.parse_wechat(gszch,trs,wechat_list)
        if html.xpath("//div[@id='_container_wechat']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_wechat']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            for pn in range(2, int(pn_all) + 1):
                wechat_list=self.get_more(url,gszch,wechat_list,'wechat',10,pn,'id',SYSID,'table',self.parse_wechat)
        return wechat_list

    def parse_wechat(self,gszch,trs,wechat_list):
        for tr in trs:
            data = []
            gzh = tr.xpath("./td[2]//span/text()")[0]  # 公众号
            wxh = tr.xpath("./td[3]//text()")[0]  # 微信号
            gnjs = tr.xpath("./td[5]/div/div/text()")[0]  # 功能介绍
            data.append(gszch)
            data.append(gzh)
            data.append(wxh)
            data.append(gnjs)
            wechat_list.append(data)
        return wechat_list

    def get_importAndExport(self, html, gszch,text):
        '''进出口信用'''
        importAndExport_list = []
        trs = html.xpath("//*[@id='_container_importAndExport']/table/tbody/tr")
        cnt=1
        for tr in trs:
            data = []
            zchg = tr.xpath("./td[2]/text()")[0]  # 注册海关
            jylb = tr.xpath("./td[4]/text()")[0]  # 经营类别
            hgbm= ''
            xydj=''
            x = re.findall(
                '<td class="">%d</td><td>.*?creditRating":(.*?),.*?crCode":"(.*?)".*?class="link-click"' % (cnt), text)
            if  x[0][0]=='[]':
                xydj='-'
                hgbm = x[0][1]# 海关编码

            if x[0][0]!='[]' :
                x = re.findall(
                    '<td class="">%d</td><td>.*?:\[{"creditRating":"(.*?)",.*?"crCode":"(.*?)",.*?</script><span class="link-click"' % (
                        cnt), text)
                hgbm = x[0][1]  # 海关编码
                xydj=x[0][0]

            data.append(gszch)
            data.append(zchg)
            data.append(hgbm)
            data.append(jylb)
            data.append(xydj)
            importAndExport_list.append(data)
            cnt += 1
        return importAndExport_list

    def get_bond(self, html, gszch):
        '''债券信息'''
        bond_list = []
        trs = html.xpath("//*[@id='_container_bond']/table/tbody/tr")
        for tr in trs:
            data = []
            fxrq = tr.xpath("./td[2]/text()")[0]  # 发行日期
            zqmc = tr.xpath("./td[3]/text()")[0]  # 债券名称
            zqdm = tr.xpath("./td[4]/text()")[0]  # 债券代码
            zqlx = tr.xpath("./td[5]/text()")[0]  # 债券类型
            zxpj = tr.xpath("./td[6]/text()")[0]  # 最新评级
            data.append(gszch)
            data.append(fxrq)
            data.append(zqmc)
            data.append(zqdm)
            data.append(zqlx)
            data.append(zxpj)
            bond_list.append(data)
        return bond_list

    def get_purchaselandV2(self, html, gszch, text):
        '''购地信息'''
        purchaselandV2_list = []
        trs = html.xpath("//*[@id='_container_purchaselandV2']/table/tbody/tr")
        cnt=1
        for tr in trs:
            data = []
            qdrq = tr.xpath("./td[7]/text()")[0]  # 签订日期
            x = re.findall('''<tr><td>%d</td><td class="left-col">.*?"electronicRegulatoryNumber":"(.*?)",.*?"agreementStartTime":"(.*?)"}</script><span class="link-click"''' % (cnt), text)
            try:
                dzjgh=x[0][0]#电子监管号
            except IndexError:
                dzjgh ='-'
            try:
                yddgr=x[0][1]#约定动工日
            except IndexError:
                yddgr='-'
            gdzmj=tr.xpath("./td[4]/text()")[0]  # 供地总面积
            xzq=tr.xpath("./td[5]/text()")[0]  # 行政区
            xq=tr.xpath("./td[2]/text()")[0]  # 详情
            data.append(gszch)
            data.append(qdrq)
            data.append(dzjgh)
            data.append(yddgr)
            data.append(gdzmj)
            data.append(xzq)
            data.append(xq)
            purchaselandV2_list.append(data)
        return purchaselandV2_list

    def get_patent(self, html, gszch,SYSID,url):
        '''专利'''
        patent_list = []
        trs = html.xpath("//*[@id='_container_patent']/table/tbody/tr")
        patent_list = self.parse_tmInfo(gszch, trs, patent_list)
        if html.xpath("//div[@id='_container_patent']//ul[@class='pagination']"):
            # pn_all = html.xpath("//div[@id='_container_patent']//ul[@class='pagination']/li[last()-1]//text()")[0]
            # pn_all = re.sub('[\.]', '', str(pn_all))
            pn_all = html.xpath("//div[@id='nav-main-patentCount']/span[@class='data-count']/text()")[0]
            pn_all = int(pn_all) // 10 + 2
            if str(pn_all)[-1]=='0':
                pn_all=pn_all-1
            for pn in range(2, pn_all):
                if pn in range(2,pn_all,100):
                    time.sleep(2)
                patent_list = self.get_more(url, gszch, patent_list, 'patent',10, pn, 'id', SYSID,
                                            'table', self.parse_patent)
        return patent_list

    def parse_patent(self,gszch,trs,patent_list):
        for tr in trs:
            data = []
            sqgbr = tr.xpath("./td[2]//text()")[0]  # 申请公布日
            zlmc = tr.xpath("./td[3]//text()")[0]  # 专利名称
            sqh = tr.xpath("./td[4]//text()")[0]  # 申请号
            sqgbh = tr.xpath("./td[5]//text()")[0]  # 申请公布号
            lx = tr.xpath("./td[6]//text()")[0]  # 类型
            data.append(gszch)
            data.append(sqgbr)
            data.append(zlmc)
            data.append(sqh)
            data.append(sqgbh)
            data.append(lx)
            patent_list.append(data)
        return patent_list

    def get_tmInfo(self, html, gszch,SYSID,url):
        '''商标'''
        tmInfo_list = []
        trs = html.xpath("//*[@id='_container_tmInfo']/div[last()]/table/tbody/tr")
        tmInfo_list=self.parse_tmInfo(gszch,trs,tmInfo_list)
        if html.xpath("//div[@id='_container_tmInfo']//ul[@class='pagination']"):
            # pn_all = html.xpath("//div[@id='_container_tmInfo']//ul[@class='pagination']/li[last()-1]//text()")[0]
            # pn_all = re.sub('[\.]', '', str(pn_all))
            pn_all=html.xpath("//div[@id='_container_tmInfo']//span[@class='data-count']/text()")[0]
            pn_all=int(pn_all)//10+2
            if str(pn_all)[-1]=='0':
                pn_all=pn_all-1
            for pn in range(2, pn_all):
                tmInfo_list=self.get_more(url,gszch,tmInfo_list,'tmInfo',10,pn,'id',SYSID,'table -logo-center -sort',self.parse_tmInfo)
        return tmInfo_list

    def parse_tmInfo(self,gszch,trs,tmInfo_list):
        for tr in trs:
            data = []
            sqrq = tr.xpath("./td[2]//text()")[0]  # 申请日期
            sbmc = tr.xpath("./td[4]//text()")[0]  # 商标名称
            zch = tr.xpath("./td[5]//text()")[0]  # 注册号
            lb = tr.xpath("./td[6]//text()")[0]  # 类别
            zt = tr.xpath("./td[7]//text()")[0]  # 状态
            data.append(gszch)
            data.append(sqrq)
            data.append(sbmc)
            data.append(zch)
            data.append(lb)
            data.append(zt)
            tmInfo_list.append(data)
        return tmInfo_list

    def get_dishonest(self, html, gszch,text,url):
        '''失信人'''
        dishonest_list = []
        trs = html.xpath("//*[@id='_container_dishonest']/table/tbody/tr")
        dishonest_list=self.parse_dishonest(gszch,text,url,trs,dishonest_list)
        #print("111111111")

        # print(html.xpath("//*[@id='_container_pastDishonest']"))
        # if html.xpath("//*[@id='_container_pastDishonest']/table/tbody/tr"):
        #     trs=html.xpath("//*[@id='_container_pastDishonest']/table/tbody/tr")
        #     dishonest_list=self.parse_dishonest(gszch,text,url,trs,dishonest_list)
        return dishonest_list

    def get_past_dishonest(self,html, gszch,text,url):
        '''历史失信人'''
        dishonest_list = []
        trs = html.xpath("//*[@id='_container_pastDishonest']/table/tbody/tr")
        #print('111111111'+trs)
        dishonest_list = self.parse_dishonest(gszch, text, url, trs, dishonest_list)
        print(dishonest_list)
        return dishonest_list

    def parse_dishonest(self,gszch,text,url,trs,dishonest_list):
        '''解析失信人'''
        cnt = 1
        text=text.replace(' ','')
        for tr in trs:
            data = []
            fbrq = tr.xpath("./td[2]//text()")[0]  # --发布日期
            larq = tr.xpath("./td[3]//text()")[0]  # 立案日期
            ah = tr.xpath("./td[4]/text()")[0]  # 案号
            zxfy = tr.xpath("./td[5]/text()")[0]  # 执行法院
            lxzt = tr.xpath("./td[6]/text()")[0]  # 履行状态
            zxyjwa = tr.xpath("./td[7]/text()")[0]  # 执行依据文案
            x = re.findall(
                '''<tr><td>%d</td><td><span>.*?<spanclass="link-click"onclick='openDishonestinfoDetail\("(.*?)"\)'>详情</span>''' % (
                    cnt), text)
            did = x[0]
            xq,cardno,courtarea = self.get_dishonest_details(url, did)  # 详情
            data.append(gszch)   #工商注册号
            data.append(fbrq)    # --发布日期
            data.append(larq)    # 立案日期
            data.append(ah)      # 案号
            data.append(zxfy)    # 执行法院
            data.append(lxzt)    # 履行状态
            data.append(zxyjwa)  # 执行依据文案
            data.append(cardno)  #组织机构代码
            data.append(courtarea)      #省份
            data.append(xq)      ## 详情
            dishonest_list.append(data)
            #print(dishonest_list)
            cnt += 1
        return dishonest_list

    def get_dishonest_details(self,url,did):
        '''失信人详情'''
        _url=f'https://capi.tianyancha.com/cloud-newdim/company/getDishonestinfoDetail?did={did}&_={int(round(time.time() * 1000))}'
        headers={'Origin':'https://www.tianyancha.com',
                  'Referer':url,
                  'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
                  'version':'TYC-Web',
                  'X-AUTH-TOKEN':'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODk4MTkzNjA0NyIsImlhdCI6MTU3Mjk0MjE4NSwiZXhwIjoxNjA0NDc4MTg1fQ.7b9aYM5Hnp2FYLMOktGkL5gzLgAwpa-vgg3I4mw6FohfS81kvS8q5lcWFZwthi-ZSO_A1kjFpixul70ZOVO7nQ'
                  }
        response = requests.get(_url, headers=headers, cookies=self.cookies, timeout=10)
        data_all = json.loads(response.text)
        #print(data_all)
        cardno = data_all['data']['cardnum']   #组织机构代码
        courtarea = data_all['data']['areaname']  #省份
        xq= data_all.get('data').get('duty')  #详情
        xq=re.sub(r'[\s+]','',xq)
        #print(xq)
        if xq  is None:
            xq = '-'
        if cardno is None:
            cardno = '-'
        if courtarea is None:
            courtarea = '-'
        return xq,cardno,courtarea

    def get_product(self, html, gszch):
        '''产品信息'''
        product_list = []
        trs = html.xpath("//*[@id='_container_product']/table/tbody/tr")
        for tr in trs:
            data = []
            cpmc = tr.xpath("./td[2]//span/text()")[0]  # 产品名称
            cpjc = tr.xpath("./td[3]//text()")[0]  # 产品简称
            cpfl = tr.xpath("./td[4]//text()")[0]  # 产品分类
            ly = tr.xpath("./td[5]//text()")[0]  # 领域
            data.append(gszch)
            data.append(cpmc)
            data.append(cpjc)
            data.append(cpfl)
            data.append(ly)
            product_list.append(data)
        return product_list

    def get_rongzi(self, html, gszch,company_name,url):
        '''融资历史  gaibug'''
        rongzi_list = []
        trs = html.xpath("//*[@id='_container_rongzi']/table/tbody/tr")
        rongzi_list=self.parse_rongzi(gszch,trs,rongzi_list)
        if html.xpath("//div[@id='_container_rongzi']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_rongzi']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            rongzi_list = self.get_more_rongzi(pn_all, company_name, url, gszch, rongzi_list)
        return rongzi_list

    def get_more_rongzi(self,pn_all,company_name,url,gszch,rongzi_list):
        '''获取更多融资历史'''
        company_name = parse.quote(company_name)
        for pn in range(2,int(pn_all)+1):
            url_=f'https://www.tianyancha.com/pagination/rongzi.xhtml?ps=10&pn={pn}&name={company_name}&_={int(round(time.time() * 1000))}'
            headers={'Accept':'*/*',
                      'Connection':'keep-alive',
                     'Host':'www.tianyancha.com',
                     'Referer':f'{url}',
                     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
            # response=requests.get(url_,headers=headers,cookies=self.cookies)
            response = self.request_max(url_, headers)
            html=etree.HTML(response.text)
            trs=html.xpath("//table[@class='table -rongzi -sort']/tbody/tr")
            rongzi_list=self.parse_rongzi(gszch,trs,rongzi_list)
        return rongzi_list

    def parse_rongzi(self,gszch,trs,rongzi_list):
        '''解析融资历史'''
        for tr in trs:
            data = []
            sj = tr.xpath("./td[2]/text()")[0]  # 时间
            lc = tr.xpath("./td[4]//text()")  # 轮次
            gz = tr.xpath("./td[5]/text()")[0]  # 估值
            je = tr.xpath("./td[3]/text()")[0]  # 金额
            bl = tr.xpath("./td[6]/text()")[0]  # 比例
            tzf = tr.xpath("./td[7]//text()")  # 投资方
            lc = re.sub(r'[\s+\[\]\',]', '', str(lc))
            tzf=re.sub(r'[\[\]\',]','',str(tzf))
            data.append(gszch)
            data.append(sj)
            data.append(lc)
            data.append(gz)
            data.append(je)
            data.append(bl)
            data.append(tzf)
            rongzi_list.append(data)
        return rongzi_list

    def get_copyright(self, html, gszch,SYSID,url):
        '''软件著作权'''
        copyright_list = []
        trs = html.xpath("//*[@id='_container_copyright']/table/tbody/tr")
        copyright_list=self.parse_copyright(gszch,trs,copyright_list)
        if html.xpath("//div[@id='_container_copyright']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_copyright']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            for pn in range(2, int(pn_all) + 1):
                copyright_list=self.get_more(url,gszch,copyright_list,'copyright',10,pn,'id',SYSID,'table -breakall',self.parse_copyright)
        return copyright_list

    def parse_copyright(self,gszch,trs,copyright_list):
        for tr in trs:
            data = []
            pzrq = tr.xpath("./td[2]//text()")[0]  # 批准日期
            rjqc = tr.xpath("./td[3]//text()")[0]  # 软件全称
            rjjc = tr.xpath("./td[4]//text()")[0]  # 软件简称
            djh = tr.xpath("./td[5]//text()")[0]  #登记号
            flh = tr.xpath("./td[6]//text()")[0]  #分类号
            bbh = tr.xpath("./td[7]//text()")[0]  #版本号
            data.append(gszch)
            data.append(pzrq)
            data.append(rjqc)
            data.append(rjjc)
            data.append(djh)
            data.append(flh)
            data.append(bbh)
            copyright_list.append(data)
        return copyright_list

    def get_icp(self, html, gszch,SYSID,url):
        '''网站备案'''
        icp_list = []
        trs = html.xpath("//*[@id='_container_icp']/table/tbody/tr")
        icp_list=self.parse_icp(gszch,trs,icp_list)
        if html.xpath("//div[@id='_container_icp']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_icp']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            for pn in range(2, int(pn_all) + 1):
                icp_list=self.get_more(url,gszch,icp_list,'icp',10,pn,'id',SYSID,'table -sort',self.parse_icp)
        if html.xpath("//*[@id='_container_pastIcpList']"):
            trs = html.xpath("//*[@id='_container_pastIcpList']/table/tbody/tr")
            icp_list = self.parse_icp(gszch, trs, icp_list)
        return icp_list

    def parse_icp(self,gszch,trs,icp_list):
        for tr in trs:
            data = []
            shsj = tr.xpath("./td[2]//text()")[0]  # 审核时间
            wzmc = tr.xpath("./td[3]//text()")[0]  # 网站名称
            wzsy = tr.xpath("./td[4]//a/text()")[0]  # 网站首页
            ym = tr.xpath("./td[5]/text()")[0]  # 域名
            bah = tr.xpath("./td[6]//text()")[0]  # 备案号
            zt = '正常'  # 状态
            dwxz = '企业'  # 单位性质
            data.append(gszch)
            data.append(shsj)
            data.append(wzmc)
            data.append(wzsy)
            data.append(ym)
            data.append(bah)
            data.append(zt)
            data.append(dwxz)
            icp_list.append(data)
        return icp_list

    def get_copyrightWorks(self, html, gszch,SYSID,url):
        '''作品著作权'''
        copyrightWorks_list = []
        trs = html.xpath("//*[@id='_container_copyrightWorks']/table/tbody/tr")
        copyrightWorks_list=self.parse_copyrightWorks(gszch, trs, copyrightWorks_list)
        if html.xpath("//div[@id='_container_copyrightWorks']//ul[@class='pagination']"):
            pn_all = html.xpath("//div[@id='_container_copyrightWorks']//ul[@class='pagination']/li[last()-1]//text()")[0]
            pn_all = re.sub('[\.]', '', str(pn_all))
            for pn in range(2, int(pn_all) + 1):
                copyrightWorks_list=self.get_more(url,gszch,copyrightWorks_list,'copyrightWorks',10,pn,'id',SYSID,'table -sort',self.parse_copyrightWorks)
        return copyrightWorks_list

    def parse_copyrightWorks(self, gszch, trs, copyrightWorks_list):
        for tr in trs:
            data = []
            zpmc = tr.xpath("./td[2]//text()")[0]  # 作品名称
            djh = tr.xpath("./td[3]//text()")[0]  # 登记号
            lb = tr.xpath("./td[4]//text()")[0]  # 类别
            czwcrq = tr.xpath("./td[5]//text()")[0]  # 创作完成日期
            djrq = tr.xpath("./td[6]//text()")[0]  # 登记日期
            scwcrq = tr.xpath("./td[7]//text()")[0]  # 首次完成日期
            data.append(gszch)
            data.append(zpmc)
            data.append(djh)
            data.append(lb)
            data.append(czwcrq)
            data.append(djrq)
            data.append(scwcrq)
            copyrightWorks_list.append(data)
        return copyrightWorks_list

    def get_more(self,url,gszch,list_,content,num,pn,nd,cn_id,table,parse):
        '''获取更多'''
        url_=f'https://www.tianyancha.com/pagination/{content}.xhtml?ps={num}&pn={pn}&{nd}={cn_id}&_={int(round(time.time() * 1000))}'
        headers={'Accept':'*/*',
                  'Connection':'keep-alive',
                 'Host':'www.tianyancha.com',
                 'Referer':f'{url}',
                 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
        response = self.request_max(url_, headers)
        html=etree.HTML(response.text)
        trs=html.xpath(f"//table[@class='{table}']/tbody/tr")
        list_=parse(gszch,trs,list_)
        return list_


def get_cookies_decode_to_dict():
    path = os.getcwd() + '\\cookies.txt'
    if not os.path.exists(path):
        return None
    else:
        cookies_dict = {}
        with open(path , 'r') as f:
            cookies = json.loads(f.read().encode("utf-8-sig"))
            for cookie in cookies:
                cookies_dict[cookie['name']] = cookie['value']
            return cookies_dict


if __name__ == '__main__':

    cookies = get_cookies_decode_to_dict()
    #print(cookies)
    read = ReadMySQL()
    data = read.read_data()
    for value in data:
        key = value[0]
        if key == '-' or key is None:
            key = value[1]
        s = Spider(cookies, value=key)
        a, b = s.run()

        if b == '没有工商注册号':
            # self.printf(f"{value[1]} 没有工商注册号，跳过采集。")
            continue
        if b == 'cookies过期请重新登录':
            # self.printf("采集停止，cookies过期，请重新登录。")
            break
        if b == '需要手工登录网页进行验证':
            # self.printf("采集停止，请手工登录网页进行验证，再开启采集程序。")
            break
        if b=='存入数据库有误':
            print(f'{a} 存入数据库有误')
            break
        if a != None:
            # self.printf(f'{value[1]} {b} 采集失败，错误信息：%s' % b)

            continue
        cnt = read.read_already_update()[0][0]
        print(cnt)
        # self.printf(f"{value[1]}  采集成功,已采集{cnt}条数据")
        print(f"{value[1]}  采集成功,已采集{cnt}条数据")




