# -*- coding: utf-8 -*-
import os
import xlrd
import time
import json
import socket
import random
import requests
import traceback
import util.LogUtil as log
from bs4 import BeautifulSoup

'''
  1.当前脚本使用天眼查，https://www.tianyancha.com
  2.根据【公司名称】搜索，得到符合要求的结果页
  3.由于频繁请求会出现验证码或接口报错，需要注册一个天眼查账号更换Cookie或手动处理图片验证码
  4.修改Phone_list里面的手机号和Cookie_list里面的Cookie，即可使用，如果爬取数据量较大，需要配置代理IP，即proxies
'''

# 初始化日志
log = log.TNLog()

Phone_list = {
    '测试手机号1': '18694925428',
    '测试手机号2': '18694925438',
}
Cookie_list = {
    '18694925428':
        'TYCID=26c830b0928d11eb83854144d5d8c7b8; ssuid=2962966688; _ga=GA1.2.1822783230.1617242060; jsid=https%3A%2F%2Fwww.tianyancha.com%2F%3Fjsid%3DSEM-BAIDU-PZ-SY-2021112-JRGW; _bl_uid=4Lkgdva870kgFO1j5itXtFh9j7wF; creditGuide=1; bad_id658cce70-d9dc-11e9-96c6-833900356dc6=8496bb51-3605-11ec-be01-ffdbb935e7d9; tyc-user-phone=%255B%252218694925428%2522%255D; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218694925428%22%2C%22first_id%22%3A%221788b236d0121-0f62fc0172c02e-383e550a-2073600-1788b236d024e0%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%221788b236d0121-0f62fc0172c02e-383e550a-2073600-1788b236d024e0%22%7D; _gid=GA1.2.1376446731.1635730705; bad_id71a50b50-56e6-11ea-a2b2-3587b4aee6bb=9912cb22-3b9e-11ec-a365-5b7375bec0a5; tyc-user-info={%22state%22:%220%22%2C%22vipManager%22:%220%22%2C%22mobile%22:%2218694925428%22}; auth_token=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODY5NDkyNTQyOSIsImlhdCI6MTYzNTg0NDgxOCwiZXhwIjoxNjY3MzgwODE4fQ.lwnKR62WT5suI8D-0NqzaQ3C73YKTtEtlhsKMH0uwazcl57o0-5dfmCDQkyzYHPVuTQUMszKvL7ww35AOUZExA; tyc-user-info-save-time=1635844848012; aliyungf_tc=58a8cedf6a1fd676874c8fae26cb3af5c08d2546db231a4ef6cb5bccc31c51d6; csrfToken=8Lq5ckTKEKEmsG19Tedkwx0D; relatedHumanSearchGraphId=141289410; relatedHumanSearchGraphId.sig=qwHvpSU9ydBKHD0rRhI6gLvqEkLwgmDht2nWDYRXpVw; bannerFlag=true; Hm_lvt_e92c8d65d92d534b0fc290df538b4758=1635730705,1635817501,1635838103,1635903507; acw_tc=781bad4816359187146248060e66c5bd2e43ddead6de75a0f00b46f0fb66da; Hm_lpvt_e92c8d65d92d534b0fc290df538b4758=1635918792; _gat_gtag_UA_123487620_1=1; searchSessionId=1635918799.93700848',
    '18694925438':
        'TYCID=26c830b0928d11eb83854144d5d8c7b8; ssuid=2962966688; _ga=GA1.2.1822783230.1617242060; jsid=https%3A%2F%2Fwww.tianyancha.com%2F%3Fjsid%3DSEM-BAIDU-PZ-SY-2021112-JRGW; _bl_uid=4Lkgdva870kgFO1j5itXtFh9j7wF; creditGuide=1; bad_id658cce70-d9dc-11e9-96c6-833900356dc6=8496bb51-3605-11ec-be01-ffdbb935e7d9; tyc-user-phone=%255B%252218694925428%2522%255D; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218694925428%22%2C%22first_id%22%3A%221788b236d0121-0f62fc0172c02e-383e550a-2073600-1788b236d024e0%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%221788b236d0121-0f62fc0172c02e-383e550a-2073600-1788b236d024e0%22%7D; _gid=GA1.2.1376446731.1635730705; bad_id71a50b50-56e6-11ea-a2b2-3587b4aee6bb=9912cb22-3b9e-11ec-a365-5b7375bec0a5; tyc-user-info={%22state%22:%220%22%2C%22vipManager%22:%220%22%2C%22mobile%22:%2218694925428%22}; auth_token=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODY5NDkyNTQyOSIsImlhdCI6MTYzNTg0NDgxOCwiZXhwIjoxNjY3MzgwODE4fQ.lwnKR62WT5suI8D-0NqzaQ3C73YKTtEtlhsKMH0uwazcl57o0-5dfmCDQkyzYHPVuTQUMszKvL7ww35AOUZExA; tyc-user-info-save-time=1635844848012; aliyungf_tc=58a8cedf6a1fd676874c8fae26cb3af5c08d2546db231a4ef6cb5bccc31c51d6; csrfToken=8Lq5ckTKEKEmsG19Tedkwx0D; relatedHumanSearchGraphId=141289410; relatedHumanSearchGraphId.sig=qwHvpSU9ydBKHD0rRhI6gLvqEkLwgmDht2nWDYRXpVw; bannerFlag=true; Hm_lvt_e92c8d65d92d534b0fc290df538b4758=1635730705,1635817501,1635838103,1635903507; acw_tc=781bad4816359187146248060e66c5bd2e43ddead6de75a0f00b46f0fb66da; Hm_lpvt_e92c8d65d92d534b0fc290df538b4758=1635918792; _gat_gtag_UA_123487620_1=1; searchSessionId=1635918799.93700848'
}

# 获取代理
def get_proxy():
    proxy = requests.get("http://127.0.0.1:5010/get/").json().get("proxy")
    time.sleep(2)
    log.info(proxy)
    return proxy

# 删除代理
def delete_proxy(proxy):
    requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))

#生成随机手机号
def get_phone():
    str_start=random.choice(['135','136','138'])
    str_end=''.join(random.sample('0123456789',8))
    str_phone=str_start+str_end
    # print(str_phone)
    return str_phone

# 获取Excel数据
def read_excel_data(filepath,sheet):
    workbook = xlrd.open_workbook(filepath)
    sheet = workbook.sheet_by_index(sheet)
    row_value_list = []
    for index in range(0, sheet.nrows):
        row_value = sheet.row_values(index)
        row_value_list.append(row_value[0])
    # print(row_value_list)
    return row_value_list

# 判断文件是否存在，存在无效果，不存在则生成文件夹
def isexists_dir_Create(path,index):
    # log.info(path[:index])
    if not os.path.exists(path[:index]):
        os.makedirs(path[:index])
        
# 定义请求函数
def get(url,index,key,type,state,proxy_state):
    name = ''
    phone = ''
    random_phone = ''
    max_retry = 0
    retry_flag=False

    # 失败重试
    while max_retry < 3 and retry_flag==False:
        # 使自动替换Cookie，解决验证码问题
        Phone_id = 0
        while Phone_id< len(Phone_list):
            try:
                # 根据手机号获取Cookie
                # Cookie = Cookie_list[Phone_list['{}'.format(Phone_id)]]
                name  = [i for i in Phone_list.keys()][Phone_id]
                phone = [i for i in Phone_list.values()][Phone_id]
                Cookie = Cookie_list[phone]
                # log.info(Cookie)
                # 生成随机手机号
                random_phone = get_phone()
                # 伪装用户
                header = {
                    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
                    'Host':'www.tianyancha.com',
                    # 替换Cookie里面的手机号为随机手机号
                    # 'Cookie': Cookie.replace(Phone_list['{}'.format(Phone_id)],phone),
                    'Cookie': Cookie.replace(phone,random_phone),
                }
                # 配置请求数据
                parser = {
                    'scheme':'https',
                    'host':'www.tianyancha.com',
                    'filename':'/search',
                    'key':str(key)
                }
                # 使用代理ip
                if proxy_state>0:
                    # proxies = {'http':'60.168.80.22:3256'}
                    proxy = get_proxy()
                    proxies = {"http": "http://{}".format(proxy)}
                    # 发起请求
                    r = requests.get(url,headers=header,params=parser,proxies=proxies,stream=True,allow_redirects=False)
                    log.info('当前主机名称为: ' + socket.gethostname())
                    log.info('当前主机地址为: ' + socket.gethostbyname(socket.gethostname()))
                    log.info('当前使用代理为: ' + str(proxies)) 
                    # delete_proxy(proxy)
                else:
                    r = requests.get(url,headers=header,params=parser,allow_redirects=False)
                r.encoding = 'uft-8'
                if type=='企业':
                        type_name = type
                elif type=='供应商':
                        type_name = type
                if r.status_code == 200:
                    if state > 0:
                        log.info('<------------------------ 第['+str(max_retry+1)+']次开始获取第['+str(index+1)+']家【'+str(key)+'】'+type_name+'信息------------------------>')
                        log.info('账户姓名：' + name)
                        log.info('原始账户：' + phone)
                        log.info('随机账号：' + random_phone)
                        log.info('请求地址：' + url)
                        log.info('请求状态：' + str(r.status_code))
                        log.info('获取状态: 成功')
                    retry_flag = True
                    break
                else:
                    log.info('<------------------------ 第['+str(max_retry+1)+']次开始获取第['+str(index+1)+']家【'+str(key)+'】'+name+'信息------------------------>')
                    log.info('账户姓名：' + name)
                    log.info('原始账户：' + phone)
                    log.info('随机账号：' + random_phone)
                    log.info('请求地址：' + url)
                    log.error('请求状态：' + str(r.status_code))
                    log.error('错误代码：' + str(r.status_code)+ ' 请求过于频繁，登录Cookie已失效或出现验证码效验')
                    log.error('获取状态: 失败')
                    log.error('解决方案：登录个人的账户，获取Cookie进行替换或手动进行验证码效验')
                    # 使用多个人的账户，获取Cookie进行替换
                    Phone_id += 1
                    # 所有Cookie都失效，手动进行验证码效验
                    if Phone_id == len(Phone_list):
                        log.error('解决方案：当前所有Cookie都失效，请手动在30秒内进行验证码效验操作')
                        log.error('<---- 请手动打开网页【 '+ url +' 】 进行验证码效验 ---->')
                        second = 30
                        while second > 0:
                            time.sleep(1)
                            log.error('正在倒计时：'+str(second)+'秒')
                            second -= 1
                        retry_flag = False
            # SyntaxError定位报错，Exception打样报错
            # except SyntaxError as e:
            except Exception as e:
                # log.error(e)
                log.error(traceback.format_exc())
        max_retry += 1
    # log.info(r.text)
    return r.text

def getlist(url,key,state,proxy_state):
    try:
        Cookie = 'aliyungf_tc=141426572a1bda565b523e8bd64f5ed737819963293894fcaa6fe1ec430e4f15; csrfToken=hZ_OCmvm1gSEznJruHgUj6RL; TYCID=8167fbb0389911ec9a4c8948b4f7b33d; ssuid=6783359037; bannerFlag=true; Hm_lvt_e92c8d65d92d534b0fc290df538b4758=1635499262; _ga=GA1.2.1204495829.1635499262; tyc-user-info={%22state%22:%220%22%2C%22vipManager%22:%220%22%2C%22mobile%22:%2216675162836%22}; tyc-user-info-save-time=1635499326314; auth_token=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNjY3NTE2MjgzNiIsImlhdCI6MTYzNTQ5OTMyNSwiZXhwIjoxNjY3MDM1MzI1fQ.AxPrfL0wQVhhHJheu4yIcXF2l-Uipkcr1YqbIU5wvO04DYqwmhh9RhPcqrIIYU3iePjoditGWNqfJubj6ynA1g; tyc-user-phone=%255B%252216675162836%2522%255D; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2216675162836%22%2C%22first_id%22%3A%2217ccb5a3e06674-051ffd656762e7-1c3a6657-1024000-17ccb5a3e07b26%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E5%A4%A9%E7%9C%BC%E6%9F%A5%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Fother.php%22%7D%2C%22%24device_id%22%3A%2217ccb5a3e06674-051ffd656762e7-1c3a6657-1024000-17ccb5a3e07b26%22%7D; _gid=GA1.2.1116475126.1635819963; relatedHumanSearchGraphId=2318455639; relatedHumanSearchGraphId.sig=ZoL_WnSJ41CUW8dstoXUDqaEEl_cVtXEKyXJPdKRg1c; jsid=https%3A%2F%2Fwww.tianyancha.com%2F%3Fjsid%3DSEM-BAIDU-PP-TYC-100001%26bd_vid%3D8446961105861732721%26userid%3D31769301%26query%3D%25B2%25E9%25D1%25AF%25CC%25EC%25D1%25DB%25C6%25F3%25D2%25B5%26keywordid%3D293703286755%26campaignid%3D158356455%26groupid%3D5994400039%26renqun_youhua%3D2828757; acw_tc=781bad3916358407819608109e1d178f07b33e32a6c78da290a1999a249421; acw_sc__v2=6180f30d0defe3255b2f7e271845f41f260c43e9; Hm_lpvt_e92c8d65d92d534b0fc290df538b4758=1635840795; _gat_gtag_UA_123487620_1=1; searchSessionId=1635840803.77173982'
        header = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
            'Host':'www.tianyancha.com',
            'Cookie':Cookie,
        }
        # 发起请求
        parser = {
            'scheme':'https',
            'host':'www.tianyancha.com',
            'filename':'/search',
            'key':str(key),}
        if state>0:
            log.info('<------------------------ 开始获取【'+str(key)+'】 企业信息------------------------>')
        
        if proxy_state>0:
            proxy = get_proxy()
            proxies = {"http": "http://{}".format(proxy)}
            r = requests.get(url,headers=header,params=parser,proxies=proxies,stream=True,allow_redirects=False)
            log.info('当前主机名称为: ' + socket.gethostname())
            log.info('当前主机地址为: ' + socket.gethostbyname(socket.gethostname()))
            log.info('当前使用代理为: ' + str(proxies)) 
            delete_proxy(proxy)
        else:
            r = requests.get(url,headers=header,params=parser,allow_redirects=False)
        r.encoding = 'uft-8'
        # log.info(r.request.headers)
        if r.status_code == 200:
            log.info('请求地址：' + url)
            log.info('请求状态：' + str(r.status_code))
            log.info('获取状态: 成功')
        else:
            log.info('请求地址：' + url)
            log.info('请求状态：' + str(r.status_code))
            log.info('错误代码：' + str(r.status_code)+ ' 请求过于频繁，登录Cookie已失效或出现验证码效验')
            log.info('获取状态: 失败')
        return r.text
    except Exception as e:
        log.info(e)
        # sys.exit()

# ---------------------------------------------------------------------------------------
def get_company_info(html,key,write_state,proxy_state):
    head = {
        "time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
        "proxy": proxy_state
    }
    company_info = {}
    all_company_info = []
    company_info_key = ['公司名称','公司法人','注册资本','成立日期','联系电话','公司编号','公司详情']
    company_info_value = ['','','','','','','']
    error = {'错误提示': '['+key+']该企业下找不到公司信息...'}
    try:
        start = time.time()
        soup = BeautifulSoup(html,'lxml')
        div = soup.select('div.result-list > div.search-item')[0]
        for index,key in enumerate(company_info_key):
            try:
                if index == 0:
                    company_name = div.select('div.content > div.header')[0].contents[0].getText()
                    company_info_value[index] = company_name
                elif index == 1:
                    company_faren = div.select('div.content > div.info')[0].contents[0].contents[0].getText()
                    if company_faren=='法定代表人：' or company_faren=='负责人：' or company_faren=='投资人：':
                        company_faren = div.select('div.content > div.info')[0].contents[0].contents[1].getText()
                        company_info_value[index] = company_faren
                elif index == 2:
                    company_ziben = div.select('div.content > div.info')[0].contents[1].contents[0].getText()
                    if company_ziben=='注册资本：' or company_ziben=='开办资金：' or company_ziben=='出资额：':
                        company_ziben = div.select('div.content > div.info')[0].contents[1].contents[1].getText()
                        company_info_value[index] = company_ziben
                elif index == 3:
                    company_cdate = div.select('div.content > div.info')[0].contents[2].contents[0].getText()
                    if company_cdate=='成立日期：':
                        company_cdate = div.select('div.content > div.info')[0].contents[2].contents[1].getText()
                        company_info_value[index] = company_cdate
                elif index == 4:
                    company_phone = div.select('div.content > div.contact')[0].contents[0].contents[0].getText()
                    if company_phone=='电话：':
                        company_phone = div.select('div.content > div.contact')[0].contents[0].contents[1].getText()
                        company_info_value[index] = company_phone
                elif index == 5:
                    company_id = div.select('.search-result-single')[0].attrs['data-id']
                    company_info_value[index] = company_id
                elif index == 6:
                    company_href = div.select('div.content > div.header')[0].contents[0].attrs['href']
                    company_info_value[index] = company_href
            except Exception as e:
                # log.error(e)
                log.error(traceback.format_exc())
                continue

        # 拼接成Key,value{'公司名称': '海尔智家股份有限公司'}
        company_info = dict(zip(company_info_key,company_info_value))
        # log.info(company_info)

        all_company_info.append(company_info)

        head['results'] = all_company_info
        # 转json并格式化
        company_info_json = json.dumps(head, ensure_ascii=False,sort_keys=False, indent=4, separators=(',', ': '))
        log.info(company_info_json)

        if write_state>0:
            name1 = os.getcwd()
            path = ''+name1+'/result/txt/'
            index = -17
            name = 'company_info.txt'
            isexists_dir_Create(path+name,index)
            # 写入文件句柄
            with open (path+name,'w+',encoding='utf-8') as f:
                f.write(company_info_json + '\n')    
                # 关闭文件句柄
                f.close()
            # 获取结束时间
            end = time.time()
            # 打印所用时间
            log.info('单个公司信息写入完毕,用时{:.2f}秒'.format(end-start))
    except Exception as e:
        # log.error(e)
        log.error(traceback.format_exc())
        log.info(error['错误提示'])
        company_info = error

    return company_info

def get_odd_company_info(url,excel,sheet,type,state,write_state,proxy_state):
    start = time.time()
    head = {
        "time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
        "proxy": proxy_state
    }
    all_company_info = []

    keys = read_excel_data(excel,sheet)
    for index,key in enumerate(keys):
        company_info = get_company_info(get(url+ str(key),index,key,type,state,proxy_state),key,0,proxy_state)
        all_company_info.append(company_info)
        
    head['results'] = all_company_info
    all_company_info_json = json.dumps(head, ensure_ascii=False,sort_keys=False, indent=4, separators=(',', ': '))
    log.info(all_company_info_json)

    if write_state>0:
        name1 = os.getcwd()
        path = ''+name1+'/result/txt/'
        index = -17
        name = 'company_info.txt'
        isexists_dir_Create(path+name,index)
        # 写入文件句柄
        # a+ 追加 r+ 只读 w+ 覆盖
        with open (path+name,'w+',encoding='utf-8') as f:
            f.write(all_company_info_json + '\n')    
            # 关闭文件句柄
            f.close() 
            # 获取结束时间
            end = time.time()
            # 打印所用时间
        log.info('多个公司信息写入完毕,用时{:.2f}秒'.format(end-start))
    return all_company_info_json

def get_id_supplier_info(html,index,type,write_state,proxy_state):
    head = {
        "time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
        "proxy": proxy_state
    }
    company_info = {}
    supplier_info = []
    all_company_info = []
    url = 'https://www.tianyancha.com/search?key='
    error = {'错误提示': '该企业下找不到供应商信息...'}
    try:
        start = time.time()
        soup = BeautifulSoup(html,'lxml')
        company_name = soup.select('.copy-it')[0].getText()
        company_info = get_company_info(get(url+str(company_name),index,company_name,'企业',1,proxy_state),company_name,0,proxy_state)
        divs = soup.select('div.product-info-card > div.block-data')[0]
        titles = divs.select('.partner-company-header')[0]
        if len(titles) > 3:
            # 有客户，有供应商
            title = titles.contents[1].contents[0].getText()
            if title == '供应商':
                slider_brands = divs.select('.slider-brand')
                if len(slider_brands) > 1:
                    slider_brand = slider_brands[1]
                    position_rels = slider_brand.select('.position-rel')[0]
                else:
                    slider_brand = slider_brands[0]
                    position_rels = slider_brand.select('.position-rel')[0]
            else:
                position_rels = ''
                log.info(error['错误提示'])
                supplier_info.append(error)
        else:
            # 没有客户,只有供应商
            title = titles.contents[0].contents[0].getText()
            if title == '供应商':
                slider_brands = divs.select('.slider-brand')
                if len(slider_brands) > 1:
                    slider_brand = slider_brands[1]
                    position_rels = slider_brand.select('.position-rel')[0]
                else:
                    slider_brand = slider_brands[0]
                    position_rels = slider_brand.select('.position-rel')[0]
            else:
                position_rels = ''
                log.info(error['错误提示'])
                supplier_info.append(error)

        log.info('名下供应商总数: '+ str(len(position_rels)))
        for index,supplier in enumerate(position_rels):
            supplier_name = supplier.select('.partner-companyname')[0].getText()
            supplier_company_info = get_company_info(get(url+str(supplier_name),index,supplier_name,type,1,proxy_state),supplier_name,0,proxy_state)
            supplier_info.append(supplier_company_info)
        # log.info(supplier_info)

        company_info['suppliers'] = supplier_info
        all_company_info.append(company_info)

        head['results'] = all_company_info
        supplier_info_json = json.dumps(head, ensure_ascii=False,sort_keys=False, indent=4, separators=(',', ': '))
        log.info(supplier_info_json)
        
        if write_state>0:
            name1 = os.getcwd()
            path = ''+name1+'/result/txt/'
            index = -18
            name = 'supplier_info.txt'
            isexists_dir_Create(path+name,index)
            # 写入文件句柄
            with open (path+name,'w+',encoding='utf-8') as f:
                f.write(supplier_info_json + '\n')

            # 关闭文件句柄
            f.close()
            # 获取结束时间
            end = time.time()
            # 打印所用时间
            log.info('单个供应商信息写入完毕,用时{:.2f}秒'.format(end-start))
    except Exception as e:
        # log.error(e)
        log.error(traceback.format_exc())
        log.info(error['错误提示'])
        supplier_info.append(error)
    return supplier_info

def get_company_supplier_info(htmls,type,write_state,proxy_state):
    head = {
        "time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
        "proxy": proxy_state
    }
    all_company_info = []
    company_info = {}
    company_info_key = ['公司名称','公司法人','注册资本','成立日期','联系电话','公司编号','公司详情']
    error = {'错误提示': '该企业下找不到公司信息...'}
    for inde,html in enumerate(htmls):
        company_info_value = ['','','','','','','']
        try:
            start = time.time()
            soup = BeautifulSoup(html,'lxml')
            div = soup.select('div.result-list > div.search-item')[0]
            for index,key in enumerate(company_info_key):
                try:
                    if index == 0:
                        company_name = div.select('div.content > div.header')[0].contents[0].getText()
                        company_info_value[index] = company_name
                    elif index == 1:
                        company_faren = div.select('div.content > div.info')[0].contents[0].contents[0].getText()
                        if company_faren=='法定代表人：' or company_faren=='负责人：' or company_faren=='投资人：':
                            company_faren = div.select('div.content > div.info')[0].contents[0].contents[1].getText()
                            company_info_value[index] = company_faren
                    elif index == 2:
                        company_ziben = div.select('div.content > div.info')[0].contents[1].contents[0].getText()
                        if company_ziben=='注册资本：' or company_ziben=='开办资金：' or company_ziben=='出资额：':
                            company_ziben = div.select('div.content > div.info')[0].contents[1].contents[1].getText()
                            company_info_value[index] = company_ziben
                    elif index == 3:
                        company_cdate = div.select('div.content > div.info')[0].contents[2].contents[0].getText()
                        if company_cdate=='成立日期：':
                            company_cdate = div.select('div.content > div.info')[0].contents[2].contents[1].getText()
                            company_info_value[index] = company_cdate
                    elif index == 4:
                        company_phone = div.select('div.content > div.contact')[0].contents[0].contents[0].getText()
                        if company_phone=='电话：':
                            company_phone = div.select('div.content > div.contact')[0].contents[0].contents[1].getText()
                            company_info_value[index] = company_phone
                    elif index == 5:
                        company_id = div.select('.search-result-single')[0].attrs['data-id']
                        company_info_value[index] = company_id
                    elif index == 6:
                        company_href = div.select('div.content > div.header')[0].contents[0].attrs['href']
                        company_info_value[index] = company_href
                except Exception as e:
                    # log.error(e)
                    log.error(traceback.format_exc())
                    continue
            # 拼接成Key,value{'公司名称': '海尔智家股份有限公司'}
            company_info = dict(zip(company_info_key,company_info_value))
            # log.info(company_info)
        except Exception as e:
            # log.error(e)
            log.error(traceback.format_exc())
            log.info(error['错误提示'])
            company_info = error
            all_company_info.append(company_info)
            continue

        url = 'https://www.tianyancha.com/company/' + str(company_id)
        all_supplier_info = get_id_supplier_info(get(url,inde,company_name,type,0,proxy_state),inde,type,0,proxy_state)
        
        company_info['suppliers'] = all_supplier_info
        all_company_info.append(company_info)
        
    head['results'] = all_company_info

    # 转json并格式化
    all_company_info_json = json.dumps(head, ensure_ascii=False,sort_keys=False, indent=4, separators=(',', ': '))
    # log.info(all_company_info_json)

    if write_state>0:
        name1 = os.getcwd()
        path = ''+name1+'/result/txt/'
        index = -18
        name = 'supplier_info.txt'
        isexists_dir_Create(path+name,index)
        # 写入文件句柄
        with open (path+name,'w+',encoding='utf-8') as f:
            f.write(all_company_info_json + '\n')    
            # 关闭文件句柄
            f.close()
            # 获取结束时间
        end = time.time()
            # 打印所用时间
        log.info('公司与供应商信息写入完毕,用时{:.2f}秒'.format(end-start))
    
    return all_company_info

def get_all_company_info(html,key,page):
    company_info = {}
    company_info_key = ['公司页码','公司序号','公司名称','公司法人','注册资本','成立日期','联系电话','公司编号','公司详情']
    page_company = {}
    page_company_info = []
    first_page_company_name = ''
    every_page_company_name = ''
    error = {'错误提示': '该企业下找不到公司信息...'}
    try:
        soup = BeautifulSoup(html,'lxml')
        divs = soup.select('div.result-list > div.search-item')
        page_company['page'] = '第'+str(page+1)+'页'
        log.info('<------------------------ 开始获取【'+key+'】第['+str(page+1)+']页企业信息------------------------>')
        for pageid,div in enumerate(divs):
            company_info_value = ['','','','','','','','','']
            for index,key1 in enumerate(company_info_key):
                try:
                    if index == 0:
                        company_info_page = '第'+str(page+1)+'页'
                        company_info_value[index] = company_info_page
                    if index == 1:
                        company_info_pageid = str(pageid+1)
                        company_info_value[index] = company_info_pageid
                    elif index == 2:
                        company_name = div.select('div.content > div.header')[0].contents[0].getText()
                        company_info_value[index] = company_name
                        # 标记第一页第一个结果
                        if page == 0 and pageid == 0:
                            first_page_company_name = company_name
                        # 标记每一页第一个结果
                        if pageid == 0:
                            every_page_company_name = company_name
                    elif index == 3:
                        company_faren = div.select('div.content > div.info')[0].contents[0].contents[0].getText()
                        if company_faren=='法定代表人：' or company_faren=='负责人：' or company_faren=='投资人：':
                            company_faren = div.select('div.content > div.info')[0].contents[0].contents[1].getText()
                            company_info_value[index] = company_faren
                    elif index == 4:
                        company_ziben = div.select('div.content > div.info')[0].contents[1].contents[0].getText()
                        if company_ziben=='注册资本：' or company_ziben=='开办资金：' or company_ziben=='出资额：':
                            company_ziben = div.select('div.content > div.info')[0].contents[1].contents[1].getText()
                            company_info_value[index] = company_ziben
                    elif index == 5:
                        company_cdate = div.select('div.content > div.info')[0].contents[2].contents[0].getText()
                        if company_cdate=='成立日期：':
                            company_cdate = div.select('div.content > div.info')[0].contents[2].contents[1].getText()
                            company_info_value[index] = company_cdate
                    elif index == 6:
                        company_phone = div.select('div.content > div.contact')[0].contents[0].contents[0].getText()
                        if company_phone=='电话：':
                            company_phone = div.select('div.content > div.contact')[0].contents[0].contents[1].getText()
                            company_info_value[index] = company_phone
                    elif index == 7:
                        company_id = div.select('.search-result-single')[0].attrs['data-id']
                        company_info_value[index] = company_id
                    elif index == 8:
                        company_href = div.select('div.content > div.header')[0].contents[0].attrs['href']
                        company_info_value[index] = company_href
                except Exception as e:
                    # log.error(e)
                    log.error(traceback.format_exc())
                    continue

                # 拼接成Key,value{'公司名称': '海尔智家股份有限公司'}
                company_info = dict(zip(company_info_key,company_info_value))
                # log.info(company_info)

            company_info_json = json.dumps(company_info, ensure_ascii=False,sort_keys=False, indent=4, separators=(',', ': '))
            log.info(company_info_json)
                
            page_company_info.append(company_info)
            page_company['companys'] = page_company_info

            # 转json并格式化
            page_company_json = json.dumps(page_company, ensure_ascii=False,sort_keys=False, indent=4, separators=(',', ': '))
            # log.info(page_company_json)
    except Exception as e:
        # log.error(e)
        log.error(traceback.format_exc())
        log.info(error['错误提示'])
        company_info = error
        page_company_info.append(company_info)

    return page_company,first_page_company_name,every_page_company_name

def get_page_all_company_info(keys,page_total,state,write_state,proxy_state):
    start = time.time()
    all_company_info = []
    for index,key in enumerate(keys):
        urls = ['https://www.tianyancha.com/search/p{0}?key={1}'.format(i,str(key)) for i in range(1,int(page_total)+1)]
        page_companys = []
        first_page_company_name = ''
        for page,url in enumerate(urls):
            html = get(url,index,key,'企业',state,proxy_state)
            (page_company,b,c) = get_all_company_info(html,key,page)
            # 标记第一页第一个结果
            if page == 0:
                first_page_company_name = b
            # 循环每一页，判断每一页第一个和第一页第一个结果是否相同，相同则跳出
            if page > 0 and c == first_page_company_name:
                log.error('第'+str(page+1)+'页第一个['+c+']和第1页第一个['+first_page_company_name+']结果相同')
                break
            page_companys.append(page_company)
    
        company_info = {}
        company_info['name'] = key
        company_info['page'] = page_total
        company_info['content'] = page_companys
    
        all_company_info.append(company_info)

    head = {
        "time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
        "proxy": proxy_state,
    }
    head['results'] = all_company_info

    # 转json并格式化
    all_company_info_json = json.dumps(head, ensure_ascii=False,sort_keys=False, indent=4, separators=(',', ': '))
    # log.info(all_company_info_json)

    if write_state>0:
        name1 = os.getcwd()
        path = ''+name1+'/result/txt/'
        index = -22
        name = 'page_company_info.txt'
        isexists_dir_Create(path+name,index)
        # 写入文件句柄
        with open (path+name,'w+',encoding='utf-8') as f:
            f.write(all_company_info_json + '\n')    
            # 关闭文件句柄
            f.close()
            # 获取结束时间
        end = time.time()
            # 打印所用时间
        log.info('所有公司信息写入完毕,用时{:.2f}秒'.format(end-start))
    return all_company_info_json

# ---------------------------------------------------------------------------------------
# 根据搜索关键词，获取单个公司信息
def get_sou_company_info():
    # key = '海尔优家智能科技（北京）有限公司'
    key = input('请输入公司名称：')
    type = '企业'
    url = 'https://www.tianyancha.com/search?key='
    # state 请求状态 0-不打印 1-打印
    state = 1
    # write_state 写入状态 0-不写入 1-写入
    write_state = 1
    # proxy_state 代理状态 0-不开启 1-开启
    proxy_state = 0
    get_company_info(get(url+str(key),0,key,type,state,proxy_state),key,write_state,proxy_state)

# 读取excel，根据公司名称，获取多个公司信息
def get_excel_company_info():
    url = 'https://www.tianyancha.com/search?key='
    excel = 'tianyanc_company.xlsx'
    sheet = 0
    type = '企业'
    # state 请求状态 0-不打印 1-打印
    state = 1
    # write_state 写入状态 0-不写入 1-写入
    write_state = 1
    # proxy_state 代理状态 0-不开启 1-开启
    proxy_state = 0
    get_odd_company_info(url,excel,sheet,type,state,write_state,proxy_state)

# 根据公司编号，获取单个公司的供应商信息
def get_id_sou_supplier_info():
    # key = '150200971'
    key = input('请输入公司编号：')
    url = 'https://www.tianyancha.com/company/'+ str(key)
    type = '供应商'
    # state 请求状态 0-不打印 1-打印
    state = 0
    # write_state 写入状态 0-不写入 1-写入
    write_state = 1
    # proxy_state 代理状态 0-不开启 1-开启
    proxy_state = 0
    get_id_supplier_info(get(url,0,key,type,state,proxy_state),0,type,write_state,proxy_state)

# 根据公司名称，获取单个公司的供应商信息
def get_sou_company_supplier_info():
    # key = '海尔优家智能科技（北京）有限公司'
    key = input('请输入公司名称：')
    url = 'https://www.tianyancha.com/search?key='+str(key)
    type = '供应商'
    # state 请求状态 0-不打印 1-打印
    state = 0
    # write_state 写入状态 0-不写入 1-写入
    write_state = 1
    # proxy_state 代理状态 0-不开启 1-开启
    proxy_state = 0
    htmls = []
    htmls.append(get(url,0,key,'企业',state,proxy_state))
    get_company_supplier_info(htmls,type,write_state,proxy_state)

# 读取excel，根据公司名称，获取多个公司下的供应商信息
def get_excel_company_supplier_info():
    # keys = [
    #     '海尔优家智能科技（北京）有限公司',
    #     '重庆海尔制冷电器有限公司',
    #     '重庆日日顺电器销售有限公司',
    #     '青岛海达诚采购服务有限公司',
    #     '青岛三翼鸟科技有限公司',
    #     '合肥海尔空调器有限公司'
    # ]
    excel = 'tianyanc_company.xlsx'
    sheet = 1
    keys = read_excel_data(excel,sheet)
    url = 'https://www.tianyancha.com/search?key='
    type = '供应商'
    # state 请求状态 0-不打印 1-打印
    state = 1
    # write_state 写入状态 0-不写入 1-写入
    write_state = 1
    # proxy_state 代理状态 0-不开启 1-开启
    proxy_state = 0
    htmls = []
    for index,key in enumerate(keys):
        htmls.append(get(url+str(key),index,key,'企业',state,proxy_state))
    get_company_supplier_info(htmls,type,write_state,proxy_state)

# 根据公司名称关键词，获取搜索结果前几页的信息
# 可以自行修改页数，for i in range(1,3)爬前2页示范
def get_sou_all_company_info():
    # key = '海尔智家股份有限公司'
    # page_total = 4
    key = input('请输入搜索关键词：')
    page_total = input('请输入爬取的页数：')
    keys = []
    keys.append(key)
    
    # state 请求状态 0-不打印 1-打印
    state = 1
    # write_state 写入状态 0-不写入 1-写入
    write_state = 1
    # proxy_state 代理状态 0-不开启 1-开启
    proxy_state = 0
    
    get_page_all_company_info(keys,page_total,state,write_state,proxy_state)
    
# 读取excel，获取公司名称，搜索结果前几页的信息
# 可以自行修改页数，for i in range(1,3)爬前2页示范
def get_excel_all_company_info():
    excel = 'tianyanc_company.xlsx'
    sheet = 0
    keys = read_excel_data(excel,sheet)

    page_total = input('请输入爬取的页数：')
    # state 请求状态 0-不打印 1-打印
    state = 1
    # write_state 写入状态 0-不写入 1-写入
    write_state = 1
    # proxy_state 代理状态 0-不开启 1-开启
    proxy_state = 0
    
    get_page_all_company_info(keys,page_total,state,write_state,proxy_state)

# ---------------------------------------------------------------------------------------
def main():
    # 根据搜索关键词，获取单个公司信息
    get_sou_company_info()

    # 读取excel，根据公司名称，获取多个公司信息
    # get_excel_company_info()
    
    # 根据公司编号，获取单个公司的供应商信息
    # get_id_sou_supplier_info()

    # 根据公司名称，获取单个公司的供应商信息
    # get_sou_company_supplier_info()

    # 读取excel，根据公司名称，获取多个公司下的供应商信息
    # get_excel_company_supplier_info()
    
    # 根据公司名称关键词，获取搜索结果前几页的信息
    # get_sou_all_company_info()

    # 读取excel，获取公司名称，搜索结果前几页的信息
    # get_excel_all_company_info()

if __name__ == '__main__':
    main()