# -*- coding:utf-8 -*-
import asyncio
import logging

from save_mysql import *
import re
from urllib import parse
import os
import json
import requests
from lxml import etree
import time

import xlwt
from openpyxl import Workbook
from openpyxl import load_workbook

from urllib.request import quote
from urllib import parse



class Spider(object):
    def __init__(self, cookies, value):
        self.cookies = cookies
        self.value = value
        self.proxy = {'http': '120.83.120.72:9999'}

    def run(self):
        '''主运行函数'''
        mysql = SaveMySQL()
        #dicts = {'key': f'{self.value}'}  # 这里必须为字典
        #keyword = parse.urlencode(dicts)
        keyword = quote(self.value)   #中文转码
        url = 'https://www.tianyancha.com/search?key='+ keyword
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept - Language': 'zh - CN, zh;q = 0.9',
            'Connection': 'keep-alive',
            #'Referer': f'https://antirobot.tianyancha.com/captcha/verify?return_url=https%3A%2F%2Fwww.tianyancha.com%2Fsearch%3Fkey%3D{self.value}%26checkFrom%3DsearchBox&rnd=',
            'Referer': f'https://www.tianyancha.com/search?key={keyword}',
            #'Referer': 'https://www.tianyancha.com/search?key=%E5%AE%89%E5%BE%BD%E7%9C%81%E7%B9%81%E6%98%8C%E5%8E%BF%E6%B5%AE%E5%B1%B1%E8%8A%B1%E5%B2%97%E5%B2%A9%E6%9C%89%E9%99%90%E8%B4%A3%E4%BB%BB%E5%85%AC%E5%8F%B8',
            'Host': 'www.tianyancha.com'}
        time.sleep(2)
        response = requests.get(url, headers=headers, cookies=self.cookies, proxies=self.proxy)
        now_url=response.url
        print(now_url)
        if 'https://www.tianyancha.com/login' in now_url:
            print('cookies过期请重新登录')
            return None,'cookies过期请重新登录'
        re_rule = re.compile(r'https://www\.tianyancha\.com/company/\d*')
        #print(response.text)
        try:
            url = re_rule.findall(response.text)[0]  # 详情页url
            print(url)
        except IndexError as e:
            return e.args,'需要手工登录网页进行验证'
        SYSID = url.split(r'/')[-1]
        print(SYSID)
        time.sleep(2)
        response = requests.get(url, headers=headers, proxies=self.proxy, cookies=self.cookies)
        text = response.text
        html = etree.HTML(text)
        data_list=[]
        try:
            basic_info, gszch ,tyxydm,company_name= self.get_basic_information(html, SYSID)
        except Exception as e:
            return e.args,'基本信息'
        if gszch=='-':
            return None,'没有工商注册号'
        '''获取基本信息，返回信息列表和工商注册号'''
        print(f'基本信息:  {basic_info}')
        print('==' * 50)

        if html.xpath("//div[@id='_container_staff']"):
            try:
                print('主要人员页面信息：',html.text)
                key_person_list = self.get_key_person(html, gszch)
            except Exception as e :
                return e.args,'主要人员'
            data_list.append(['主要人员',key_person_list])
            '''获取主要人员信息,返回人员信息列表'''
            print(len(key_person_list))
            print(f'主要人员:  {key_person_list}')
            print('==' * 50)

        if html.xpath("//*[@id='_container_dishonest']"):
            '''失信人'''
            try:
                dishonest_list=self.get_dishonest(html, gszch,text,url)
            except Exception as e:
                print(e.args)
                return e.args, '失信人'
            data_list.append(['失信人', dishonest_list])
            print(len(dishonest_list))
            print(f'失信人:  {dishonest_list}')
            print('==' * 50)
        else:
            if html.xpath("//*[@id='_container_pastDishonest']"):
                try:
                    dishonest_list = self.get_past_dishonest(html, gszch, text, url)
                except Exception as e:
                    return e.args, '历史失信人'
                data_list.append(['失信人', dishonest_list])
                print(len(dishonest_list))
                print(f'历史失信人:  {dishonest_list}')
                print('==' * 50)

        if self.value==company_name:    #判断公司名称是否一致
            mysql.update_message('基本信息', basic_info)

            for data in data_list:
                mysql.delete_other(data[0], gszch)
                a,b=mysql.insert_other(data[0], data[1])
                if '存入数据库有误' in a:
                    return a,b
            ReadMySQL().upload_data(self.value)
        time.sleep(2)
        a=None
        b=None
        return a,b

    def get_basic_information(self, html, SYSID):
        '''基本信息'''
        basic_info = []
        try:
            net = html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[2]/div[1]/a/@href")[0]
        except IndexError:
            net = '暂无信息'
        company_name = html.xpath("//h1[@class='name']/text()")[0]  # 公司名称
        try:
            intro=html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[3]/div/div/text()")[0]
        except IndexError:
            intro='暂无信息'
        try:
            tel = html.xpath("//span[@class='hidden']/text()")[0]  # 电话
            tel = re.sub(r'[\[\]"]', '', str(tel))
        except IndexError:
            tel = '暂无信息'
        email = html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[1]/div[2]/span[2]/text()")[0]  # 邮箱
        address = html.xpath("//*[@id='company_web_top']/div[2]/div[3]/div[3]/div[2]/div[2]/div/div/text()")[0]  # 公司地址
        gszch = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[3]/td[4]/text()")[
            0]  # 工商注册号
        zzjgdm = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[4]/td[4]/text()")[
            0]  # 组织机构代码
        tyxydm = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[3]/td[2]/text()")[
            0]  # 统一信用代码
        gslx = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[5]/td[2]/text()")[
            0]  # 公司类型
        nsrsbh = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[4]/td[2]/text()")[
            0]  # 纳税人识别号
        hy = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[5]/td[4]/text()")[0]  # 行业
        yyqx = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[7]/td[2]//text()")[
            0]  # 营业期限
        djjg = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[6]/td[4]//text()")[
            0]  # 登记机关
        zcdz = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[10]/td[2]//text()")[
            0]  # 注册地址
        ywmc = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[9]/td[4]//text()")[
            0]  # 英文名称
        jyfw = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[11]/td[2]//text()")[
            0]  # 经营范围
        zczb = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[1]/td[2]//text()")[
            0]  # 注册资本
        zcsj = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[2]/td[2]//text()")[
            0]  # 注册时间
        hzsj = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[6]/td[2]//text()")[
            0]  # 核准时间
        qyzt = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[2]/td[4]//text()")[
            0]  # 企业状态
        nszz = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[7]/td[4]//text()")[
            0]  # 纳税资质
        rygm = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[8]/td[2]//text()")[
            0]  # 人员规模
        sjzb = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[1]/td[4]//text()")[
            0]  # 实缴资本
        cbrs = html.xpath("//table[@class='table -striped-col -border-top-none -breakall']//tr[8]/td[4]//text()")[
            0]  # 参保人数
        if zczb == '-':
            zczb = ''
        fr = html.xpath("//div[@class='name']//text()")[0]  # 法人
        basic_info.append(SYSID)
        basic_info.append(tel)
        basic_info.append(email)
        basic_info.append(net)
        basic_info.append(address)
        basic_info.append(intro)
        basic_info.append(gszch)
        basic_info.append(zzjgdm)
        basic_info.append(tyxydm)
        basic_info.append(gslx)
        basic_info.append(nsrsbh)
        basic_info.append(hy)
        basic_info.append(yyqx)
        basic_info.append(djjg)
        basic_info.append(zcdz)
        basic_info.append(ywmc)
        basic_info.append(jyfw)
        basic_info.append(company_name)
        basic_info.append(fr)
        basic_info.append(zczb)
        basic_info.append(zcsj)
        basic_info.append(hzsj)
        basic_info.append(qyzt)
        basic_info.append(nszz)
        basic_info.append(rygm)
        basic_info.append(sjzb)
        basic_info.append(cbrs)
        return basic_info, gszch ,tyxydm,company_name

    def get_key_person(self, html, gszch):
        '''主要人员'''
        key_person_list = []
        trs = html.xpath("//*[@id='_container_staff']/div/table/tbody/tr")
        for tr in trs:
            data = []
            zw = tr.xpath('./td/span//text()')  # 职务
            zw = re.sub(r'[\]\[\'\s+，]', '', str(zw))
            name = tr.xpath(".//a[@class='link-click']/text()")[0]  # 名字
            bz = tr.xpath(".//a[@class=' link-vip tips-toco']//text()")  # 备注
            bz = re.sub(r'[\[\],\'\s+]', '', str(bz))
            data.append(gszch)
            data.append(zw)
            data.append(name)
            data.append(bz)
            key_person_list.append(data)
        return key_person_list

    def request_max(self,url,headers):
        requests.adapters.DEFAULT_RETRIES = 15  # 增加重连次数
        s = requests.session()
        s.keep_alive = False  # 关闭多余连接
        response = s.get(url, headers=headers, cookies=self.cookies)  # 你需要的网址
        return response

    def get_dishonest(self, html, gszch,text,url):
        '''失信人'''
        dishonest_list = []
        trs = html.xpath("//*[@id='_container_dishonest']/table/tbody/tr")
        dishonest_list=self.parse_dishonest(gszch,text,url,trs,dishonest_list)
        if html.xpath("//*[@id='_container_pastDishonest']"):
            trs=html.xpath("//*[@id='_container_pastDishonest']/table/tbody/tr")
            dishonest_list=self.parse_dishonest(gszch,text,url,trs,dishonest_list)
        return dishonest_list

    def get_past_dishonest(self,html, gszch,text,url):
        '''历史失信人'''
        dishonest_list = []
        trs = html.xpath("//*[@id='_container_pastDishonest']/table/tbody/tr")
        dishonest_list = self.parse_dishonest(gszch, text, url, trs, dishonest_list)
        return dishonest_list

    def parse_dishonest(self,gszch,text,url,trs,dishonest_list):
        '''解析失信人'''
        cnt = 1
        text=text.replace(' ','')
        for tr in trs:
            data = []
            fbrq = tr.xpath("./td[2]//text()")[0]  # --发布日期
            larq = tr.xpath("./td[3]//text()")[0]  # 立案日期
            ah = tr.xpath("./td[4]/text()")[0]  # 案号
            zxfy = tr.xpath("./td[5]/text()")[0]  # 执行法院
            lxzt = tr.xpath("./td[6]/text()")[0]  # 履行状态
            zxyjwa = tr.xpath("./td[7]/text()")[0]  # 执行依据文案
            x = re.findall(
                '''<tr><td>%d</td><td><span>.*?<spanclass="link-click"onclick='openDishonestinfoDetail(.*?)'>详情</span>''' % (
                    cnt), text)
            x = eval(x[0])
            # print('匹配的数据：',x)
            # print('解析失信人接口数据源：',text)
            did = x[0]
            xq,cardno,courtarea,disrupttypename = self.get_dishonest_details(url, did)  # 详情
            data.append(gszch)      #工商注册号
            data.append(fbrq)       # --发布日期
            data.append(larq)       # 立案日期
            data.append(ah)         # 案号
            data.append(zxfy)       # 执行法院
            data.append(lxzt)       # 履行状态
            data.append(zxyjwa)     # 执行依据文案
            data.append(cardno)     #组织机构代码
            data.append(courtarea)      #省份
            data.append(disrupttypename)  #失信被执行人行为具体情形
            data.append(xq)         ## 详情
            dishonest_list.append(data)
            cnt += 1
        return dishonest_list

    def get_dishonest_details(self,url,did):
        '''失信人详情'''
        _url=f'https://capi.tianyancha.com/cloud-newdim/company/getDishonestinfoDetail?did={did}&_={int(round(time.time() * 1000))}'
        headers={'Origin':'https://www.tianyancha.com',
                  'Referer':url,
                  'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
                  'version':'TYC-Web',
                  'X-AUTH-TOKEN':'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODk4MTkzNjA0NyIsImlhdCI6MTU3Mjk0MjE4NSwiZXhwIjoxNjA0NDc4MTg1fQ.7b9aYM5Hnp2FYLMOktGkL5gzLgAwpa-vgg3I4mw6FohfS81kvS8q5lcWFZwthi-ZSO_A1kjFpixul70ZOVO7nQ'
                  }
        response = requests.get(_url, headers=headers, cookies=self.cookies, timeout=10)
        data_all = json.loads(response.text)
        cardno = data_all['data']['cardnum']   #组织机构代码
        courtarea = data_all['data']['areaname']  #省份
        disrupttypename =data_all['data']['disrupttypename'] #失信被执行人行为具体情形
        xq=data_all.get('data').get('duty')
        xq=re.sub(r'[\s+]','',xq)
        if cardno is None:
            cardno = '-'
        if courtarea is None:
            courtarea = '-'
        if disrupttypename is None:
            disrupttypename = '-'
        return xq,cardno,courtarea,disrupttypename

#失信人导出excel
def save_excel(title,fileds, data_list):
    # list = [["蔡徐坤", "男", "19", "杭州", "研发工程师"],
    #         ["李四", "男", "22", "北京", "医生"],
    #         ["王五", "女", "33", "珠海", "出租车司机"], ]
    #fileds = [["姓名", "性别", "年龄", "城市", "职业"]]  # 列名
    # 写入xlsx
    wd = xlwt.Workbook()  # 表示一个表格空间
    sheet = wd.add_sheet('sheet1', cell_overwrite_ok=True)  # 添加一个            sheet..cell_overwrite_ok=True表示内容可覆盖
    for i in range(0, len(fileds)):
        for j in range(0, len(fileds[i])):
            sheet.write(i, j, fileds[i][j])  # 像表格中写入数据（对应的行和列）
    # 从第一行开始写
    row = 1  # 行数
    for data in data_list:  # 二维数据，有多少条数据，控制行数
        for col, field in enumerate(data):  # 控制列数
            sheet.write(row, col, field)
        row += 1  # 每次写完一行，行数加1
    wd.save("./data"+ title + time.strftime("%Y-%m-%d",time.localtime())+".xls")


def get_cookies_decode_to_dict():
    path = os.getcwd() + '\\cookies.txt'
    if not os.path.exists(path):
        return None
    else:
        cookies_dict = {}
        with open(path , 'r') as f:
            cookies = json.loads(f.read().encode("utf-8-sig"))
            for cookie in cookies:
                cookies_dict[cookie['name']] = cookie['value']
            return cookies_dict


def to_list(res):
    b = list(res)
    #print(b)
    for c in b:
        b[b.index(c)] = list(c)
        #print(c)
    return b

if __name__ == '__main__':
    # s = Spider(cookies, value='913301067046373179')
    # s = Spider(cookies, value='914403001922038216')#华为
    # s= Spider(cookies, value='91510108681840514A')
    # s = Spider(cookies, value='915101085535986395')  # 被执行人
    # s = Spider(cookies, value='91510000795834710U')  # 经营异常
    # s = Spider(cookies, value='91510108797828904W')  # 行政处罚
    # s = Spider(cookies, value='91430722320539304K')  # 严重违法
    # s = Spider(cookies, value='91110302783953333E')  # 股权出质
    # s = Spider(cookies, value='91510100064342018J')  # 动产抵押
    # s = Spider(cookies, value='91510108086699026J')  # 动产抵押
    # s = Spider(cookies, value='91110114102670988E')  # 欠税公告
    # s = Spider(cookies, value='914403001924714471')  # 司法拍卖
    # s = Spider(cookies, value='91110114102670988E')  # 行政处罚  处理不了
    # s = Spider(cookies, value='911101055731501533')  # 核心团队  企业业务 微信公众号
    # s = Spider(cookies, value='9111010555309385X9')  # 投资事件
    # s = Spider(cookies, value='91110302783953333E')  # 纳税评级 招投标  进出口信用
    # s = Spider(cookies, value='91110106592321798A')  # 抽查检查
    # s = Spider(cookies, value='911101128024337483')  # 资质证书  招聘
    # s = Spider(cookies, value='91110000717824939E')  # 债券信息
    #s = Spider(cookies, value='9151010076537002X3')  # 购地信息
    # s = Spider(cookies, value='	91110114102670988E')  # 专利
    #s = Spider(cookies, value='	91510108MA61T6492P')  # 商标
    #s = Spider(cookies, value='	911101056003727999')  # 失信人
    # s = Spider(cookies, value='	91110106592321798A')  # 产品信息
    # s = Spider(cookies, value='	91110106592321798A')  # 软件著作权 网站备案

    cookies = get_cookies_decode_to_dict()
    read = ReadMySQL()
    data = read.read_data()
    #print(data)
    for value in data:
        key = value[1]   #取公司名称
        # if key == '-' or key is None:
        #     key = value[1]
        print(key)
        s = Spider(cookies, value=key)
        a, b = s.run()

        if b == '没有工商注册号':
            # self.printf(f"{value[1]} 没有工商注册号，跳过采集。")
            continue
        if b == 'cookies过期请重新登录':
            # self.printf("采集停止，cookies过期，请重新登录。")
            break
        if b == '需要手工登录网页进行验证':
            # self.printf("采集停止，请手工登录网页进行验证，再开启采集程序。")
            break
        if b=='存入数据库有误':
            print(f'{a} 存入数据库有误')
            break
        if a != None:
            # self.printf(f'{value[1]} {b} 采集失败，错误信息：%s' % b)

            continue
        cnt = read.read_already_update()[0][0]
        print(cnt)
        # self.printf(f"{value[1]}  采集成功,已采集{cnt}条数据")
        print(f"{value[1]}  采集成功,已采集{cnt}条数据")
    list_data =[]
    # 失信人生成excel
    res = read.query_data()
    #print(res)
    b = to_list(res)
    fileds = [["name","iname", "shixin", "casecode", "province", "publishdate", "regdate", "businessentity","disrupttype name",
               "cardnum", "courtname","courtarea","sexy", "gistunit", "performance", "duty", "gistId"]]  # 列名
    title = '失信人'
    save_excel(title,fileds,b)

    #高管生成excel
    # res = read.query_gg_data()
    # # print(res)
    # b = to_list(res)
    # fileds = [["name", "iname", "post"]]  # 列名
    # title = '高管'
    # save_excel(title,fileds, b)
    print(f"导出采集数据成功！")


