# -*- coding: UTF-8 -*-
# Project : aiqicha
# File : index.py
# IDE : PyCharm
# Author : 博科（鑫贝西）田聪
# Date : 2021/10/26 15:11
import json
import os
import random
import re
import time
from datetime import datetime
from urllib.parse import quote

import pymysql
import requests
from retry import retry

from huobiao.huobiao.tools.region import reg
from tools.settings import *


class AiQiCha:
    def __init__(self):
        self.cookie = open(os.path.dirname(__file__) + os.sep + 'tools' + os.sep + 'cookies.txt', 'r').read()
        self.config = {
            "host": HOST,
            "user": USER,
            "password": PASSWORD,
            "database": 'daohang',
            'cursorclass': pymysql.cursors.DictCursor,

        }
        self.new_timestamp = datetime.now()
        # 从数据库中提取所有企业名称和手机号
        self.db = pymysql.connect(**self.config)
        self.cursor = self.db.cursor()
        search_sql_version = "SELECT * FROM `fa_huobiao` where search_key='灭菌器'"
        # search_sql_version = "SELECT enname,phone FROM  `fa_huobiao` WHERE search_key='核酸提取仪'"
        self.cursor.execute(search_sql_version)
        self.en_name_list = self.cursor.fetchall()  # [[{'enname': '山东康瑞尔医疗科技有限公司', 'phone': ''},

        self.start_request()

    def __del__(self):
        self.cursor.close()
        self.db.close()

    def start_request(self):
        # 获取cookie
        # 爱企查获取cookie

        # 搜索
        # self.en_name_list = [{'enname': '山东康瑞尔医疗科技有限公司', 'phone': ''},.....]
        for en_name_dict in self.en_name_list:
            en_name = en_name_dict.get('enname')  # 企业名称
            phone = en_name_dict.get('phone')  # 企业联系方式
            if phone == '' or phone is None:
                time.sleep(5)
                # 当电话为空时。爬取
                en_search_link = 'https://aiqicha.baidu.com/s?q={}&t=0'.format(quote(en_name))  # 搜索企业链接

                try:
                    self.parse(en_search_link=en_search_link)
                except Exception as error:
                    print('#' * 100)
                    print(error)
                    print(en_name_dict)
                    print('#' * 100)

    # 获取搜索企业列表页 只获取第一页
    def parse(self, en_search_link):
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Cookie': 'BAIDUID_BFESS={};'.format(self.cookie),
            'Host': 'aiqicha.baidu.com',
            'Pragma': 'no-cache',
            'sec-ch-ua': '"Not A;Brand";v="99", "Chromium";v="96", "Microsoft Edge";v="96"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': f"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/{random.randint(565, 575)}.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36 Edg/{random.randint(90, 99)}.0.1054.29",
        }

        response = self.web_spider(
            url=en_search_link,
            headers=headers
        )
        _response_json = re.search('window.pageData = ({.+}?);', response.text).group(1)
        respose_json = json.loads(_response_json)

        result_data = respose_json.get('result')  # type:dict

        # 内容列表
        result_list = result_data.get('resultList')
        query_str = result_data.get('queryStr')
        for result in result_list:

            item = {}
            entName = re.sub('<.*?>', '', result.get('entName'))  # 企业名称
            legalPerson = result.get('legalPerson')  # 法人

            item['entName'] = entName
            item['legalPerson'] = re.sub('<.*?>', '', legalPerson)
            pid = result.get('pid')  # 进入详情页
            ent_info_link = 'https://aiqicha.baidu.com/company_detail_{}'.format(pid)
            if entName == query_str:
                self.parse_content(ent_info_link, item)
                break

    def to_mysql(self, item):

        try:
            now_timestamp = datetime.now()
            delta_T = (now_timestamp - self.new_timestamp).seconds / 60  # 分钟
            if delta_T > 2:
                print("判断数据库链接是否存活")
                self.db.ping()
                self.new_timestamp = now_timestamp
        except:
            print("当前数据库链接已关闭，正在重新链接.......")

            self.db = pymysql.connect(**self.config)
            self.cursor = self.db.cursor()
            print("=-=-=-=-= 数据库已连接 -=-=-=-=-=")
        finally:
            try:
                if item['phone'] == '':
                    item['phone'] = '暂无电话'
                sql = f"UPDATE fa_huobiao SET phone='{item['phone']}',enhref='{item['enhref']}',legalperson='{item['legalPerson']}',area='{item['area']}',addr='{item['addr']}' WHERE enname = '{item['entName']}'"
                print(sql)
                self.cursor.execute(sql)
                self.db.commit()
                time.sleep(0.2)

            except Exception as e:
                print(e)

    def parse_content(self, ent_info_link, item):
        # 从详情页中获取电话

        headers = {
            'Host': 'aiqicha.baidu.com',
            'Cache-Control': 'no-cache',
            'Pragma': 'no-cache',
            'Upgrade-Insecure-Requests': '1',
            'Connection': 'keep-alive',
            'sec-ch-ua': '"Microsoft Edge";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
            'User-Agent': f"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/{random.randint(565, 575)}.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36 Edg/{random.randint(90, 99)}.0.1054.29",
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'X-Requested-With': 'XMLHttpRequest',
            'Referer': 'https://aiqicha.baidu.com/cbae/tr?headto={}'.format(quote(ent_info_link)),
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cookie': 'BAIDUID_BFESS={};'.format(self.cookie),
        }
        response = self.web_spider(
            url=ent_info_link,
            headers=headers

        )
        try:
            page_data = re.search('window.pageData = ({.+}?);', response.text).group(1)
            page_data_json = json.loads(page_data)
            result = page_data_json.get('result')
            addr = result.get('addr')  # 法人
            item['area'] = reg(addr)
            item['enhref'] = ent_info_link
            item['addr'] = addr
            phoneinfo = result.get('phoneinfo')  # type:list
            phones = []
            for phone in phoneinfo:
                phone = phone.get('phone')
                phones.append(phone)
            phones = '，'.join(phones)
            item['phone'] = phones
            # print(item)
            self.to_mysql(item)
        except IndexError as e:
            print('#' * 100)
            print(e)
            print(response.text)
            print(ent_info_link)
            print('#' * 100)

    @retry(tries=10, delay=1, backoff=2, max_delay=10)
    def web_spider(self, url, headers: dict):
        self.cookie = open(os.path.dirname(__file__) + os.sep + 'tools' + os.sep + 'cookies.txt', 'r').read()
        response = requests.get(
            url,
            headers=headers
        )
        response.encoding = response.apparent_encoding
        if response.status_code not in [302, 200]:
            raise Exception('没有获取到数据')
        time.sleep(1)
        return response


if __name__ == '__main__':
    while True:
        try:
            AiQiCha()
        except Exception as e:
            print(e)
            pass
        print("############    休眠200s    ##############")
        time.sleep(20)
