import asyncio
import json, re, time, traceback
import os, sys
import random

sys.path.append('/root/qvenv')
import redis, pymysql
import multiprocessing
import string
import requests, urllib3
from lxml import etree
from tools.insert_update import insert_sql
from tools.logout import save_log
from configs.cfg import *
from selenium import webdriver
from multiprocessing import Lock
from concurrent.futures import ProcessPoolExecutor
# from common.proxy_ip import f_list
from tools.User_Agent import UserAgent
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
# from urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecureRequestWarning


class FrenchSpider:
    # urllib3.disable_warnings(InsecureRequestWarning)
    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
    string_temp = string.ascii_uppercase
    db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                           health_check_interval=30, retry_on_timeout=True)
    pipeline = db.pipeline()
    conn = pymysql.connect(host=mysql_host, user=mysql_name, port=mysql_port, password=mysql_pwd,
                           database='source', charset='utf8', connect_timeout=30)
    cur = conn.cursor()
    # 初始化进程锁
    lock = Lock()
    # 初始化webdriver
    options = webdriver.ChromeOptions()
    options.add_argument('--disable-extensions')  # 启动无痕界面
    options.add_argument('--start-maximized')  # 设置浏览器窗口最大化
    options.add_argument('--headless')  # 浏览器不提供可视化页面
    options.add_argument('blink-settings=imagesEnabled=false')
    # 关闭chrome正受到测试脚本控制
    # options.add_experimental_option('detach', True)
    options.add_experimental_option('useAutomationExtension', False)
    options.add_argument("service_args=['–ignore-ssl-errors=true', '–ssl-protocol=TLSv1']")
    options.add_experimental_option('excludeSwitches', ['enable-automation'])
    options.add_argument('--disable-gpu')  # 禁用GPU加速
    options.add_argument('window-size=1920x3000')  # 指定浏览器分辨率
    options.add_argument('--no-sandbox')  # 添加沙盒模式
    options.add_argument('--disable-dev-shm-usage')

    id_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/parPhrase?phrase={}&typeProduitMisEnAvant=EXTRAIT&domaine=FR'
    # 源url
    base_url = 'https://www.infogreffe.com/entreprise-societe/428257042-bureau-van-dijk-information-management-750199B187990000.html?typeProduitOnglet=EXTRAIT&afficherretour=true&tab=entrep'
    # 已注册的url
    activ_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/resumeEntreprise?typeRecherche=ENTREP_RCS_ACTIF'
    activ_del_detail_url = 'https://www.infogreffe.com/entreprise-societe/{}-{}-{}0000.html?typeProduitOnglet=EXTRAIT&afficherretour=true'
    # 未注册的url
    hors_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/resumeEntreprise?typeRecherche=ENTREP_HORS_RCS'
    hors_detail_url = 'https://www.infogreffe.com/entreprise-societe/{}-{}-{}.html?afficherretour=true'
    # 已注销的url
    del_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/resumeEntreprise?typeRecherche=ENTREP_RCS_RADIES'
    # self.url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/derniereRechercheEntreprise'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
    }

    def getIDs(self, keyword):
        '''
        ['critereRecherchePrincipal', 'critereRecherche', 'entrepRechInfosComplementaires', 'entrepRCSStoreResponse', 'entrepMultiStoreResponse', 'entrepRadieeStoreResponse', 'entrepHorsRCSStoreResponse', 'reprStoreResponse', 'typeProduitMisEnAvant']
        :return:
        '''
        print(f'put data to redis queue with {keyword}...')
        resp = requests.get(self.id_url.format(keyword), headers=self.headers).content.decode()
        print('Go')
        tmp_dic = json.loads(resp)
        # 获取已注册公司数据
        rcs_items = tmp_dic['entrepRCSStoreResponse']['items']
        if rcs_items:
            r_items_li = [rcs_items[i:i + 10000] for i in range(0, len(rcs_items), 10000)]
            for r_items in r_items_li:
                for r_item in r_items:
                    self.pipeline.sadd('rcsIDSet', r_item['id'])
                self.pipeline.execute()
        # 获取已注销公司数据
        del_items = tmp_dic['entrepRadieeStoreResponse']['items']
        if del_items:
            d_items_li = [del_items[i:i + 10000] for i in range(0, len(del_items), 10000)]
            for d_items in d_items_li:
                for d_item in d_items:
                    self.pipeline.sadd('delIDSet', d_item['id'])
                self.pipeline.execute()
        # # 获取未注册公司数据
        hors_items = tmp_dic['entrepHorsRCSStoreResponse']['items']
        if hors_items:
            h_items_li = [hors_items[i:i + 10000] for i in range(0, len(hors_items), 10000)]
            for h_items in h_items_li:
                for h_item in h_items:
                    self.pipeline.sadd('horsIDSet', h_item['id'])
                self.pipeline.execute()
        print('id inserted')

    def handle_request(self, wd):
        # option = webdriver.ChromeOptions()
        # option.headless = True
        db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                               health_check_interval=30, retry_on_timeout=True)
        with ProcessPoolExecutor(max_workers=multiprocessing.cpu_count()) as pool:
            while True:
                if db.llen('queue:count') % 30 == 0:
                    print('update cookie...')
                    driver = webdriver.Chrome(options=self.options)
                    driver.implicitly_wait(20)
                    driver.get(self.base_url)
                    time.sleep(3)
                    driver.find_element(By.ID, 'acceptcookies').click()
                    driver.find_element(By.ID, 'champRechercheGlobale').click()
                    driver.find_element(By.ID, 'champRechercheGlobale').send_keys(wd)
                    driver.find_element(By.ID, 'champRechercheGlobale').send_keys(Keys.ENTER)
                    # WebDriverWait(driver,20).until(EC.presence_of_element_located((By.ID,'')))
                    time.sleep(20)
                    cookies = driver.get_cookies()
                    c = ''
                    for cookie in cookies:
                        if not cookie['name'].startswith('__ut'):
                            c += f'{cookie["name"]}={cookie["value"]};'
                    headers = {
                        'Content-Type': 'text/plain',
                        'cookie': c,
                        'User-Agent': random.choice(UserAgent)
                    }
                pool.submit(self.get_rcs_data, headers)
                pool.submit(self.get_hors_data, headers)
                pool.submit(self.get_del_data, headers)
                time.sleep(3)
                db.lpush('queue:count', '1')
                if db.scard('delIDSet') <= 0 and db.scard('rcsIDSet') <= 0 and db.scard('horsIDSet') <= 0:
                    print('redis数据已清空')
                    # 将处理完成的搜索值放到队列中，防止重复请求
                    db.sadd('succeeded_wd', wd)
                    break

    def handle_response(self, resp, detail_url):
        # dic = json.loads(resp.content.decode())
        items = json.loads(resp.content.decode())['items']
        if not items:
            print(f'{resp}数据为空')
        for item in items:
            cid = item['id']  # 公司id
            numeroDossier = item['numeroDossier']  # 编号
            name = item['libelleEntreprise']['denomination']  # 公司名称
            siren = str(item['siren']) if item['siren'] else ''  # 警号
            nic = item['nic']
            if nic:
                siret = siren + nic  # 注册编码
            else:
                siret = ''
            lignes = item['adresse']['lignes']  # 地址
            postcode = item['adresse']['codePostal']  # 邮编
            city = item['adresse']['bureauDistributeur']
            register_address = ' '.join(lignes) + ' ' + city
            sourceDonnees = item['sourceDonnees']  # 数据来源
            struck_off_date = item['dateRadiation'] if item['dateRadiation'] else ''  # 罢工时间
            activity = {}
            activity['NAF'] = item['activite']['codeNAF'] if item['activite']['codeNAF'] else ''  # 法国产业分类代码
            activity['NAFName'] = item['activite']['libelleNAF'] if item['activite']['libelleNAF'] else ''
            status = item['etatSurveillance']  # 公司状态
            # 将法语转成英文
            translationTable = str.maketrans("éàèùâêîôûç", "eaeuaeiouc")
            t = name.translate(translationTable).lower()
            res = re.sub(r'\W', '-', t)
            detail = detail_url.format(siren, res, numeroDossier)
            meta = (cid, name, detail, siren, siret, postcode, register_address, sourceDonnees, struck_off_date,
                    str(activity), status)
            self.pipeline.sadd('french_meta', str(meta))
            self.pipeline.sadd('meta', str(meta))
        self.pipeline.execute()

    def redis2mysql(self):
        db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                               health_check_interval=30, retry_on_timeout=True)
        try:
            while True:
                if db.scard('french_meta') > 1000:
                    tmp_data = db.spop('french_meta', 1000)
                    datas = [eval(data) for data in tmp_data]
                    self.conn.ping()
                    sql = insert_sql('french_company_info', (
                        'cid', 'name', 'detail_url', 'siren', 'siret', 'postcode', 'register_address', 'datasource',
                        'struck_off_date', 'activity', 'status'), datas)
                    self.cur.execute(sql)
                    self.conn.commit()
                    print('to mysql success')
                elif db.scard('french_meta') > 0:
                    tmp_data = db.spop('french_meta', db.scard('french_meta'))
                    datas = [eval(data) for data in tmp_data]
                    self.conn.ping()
                    sql = insert_sql('french_company_info', (
                        'cid', 'name', 'detail_url', 'siren', 'siret', 'postcode', 'register_address', 'datasource',
                        'struck_off_date', 'activity', 'status'), datas)
                    self.cur.execute(sql)
                    self.conn.commit()
                    print('to mysql success')
                else:
                    break
        except Exception as e:
            traceback.print_exc()
            save_log('redis2mysql failed', '../../logs/french.log')
            save_log(e, '../../logs/french.log')

    def handle_task(self):
        while True:
            headers = {
                'Content-Type': 'text/plain',
                # 'cookie': c,
                'User-Agent': random.choice(UserAgent)
            }
            db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                                   health_check_interval=30, retry_on_timeout=True)
            with ProcessPoolExecutor(max_workers=multiprocessing.cpu_count()) as p:
                print(db.scard('meta'))
                if db.scard('meta') > 100:
                    tmp_datas = db.spop('meta', 100)
                    datas = [eval(data) for data in tmp_datas]
                    [p.submit(self.handle_detail, meta[0], meta[2], headers) for meta in datas]
                elif db.scard('meta') > 0:
                    tmp_datas = db.spop('meta', db.scard('meta'))
                    datas = [eval(data) for data in tmp_datas]
                    [p.submit(self.handle_detail, meta[0], meta[2], headers) for meta in datas]
                else:
                    break
                time.sleep(5)

    def handle_detail(self, cid, detail_url, headers):
        resp = requests.get(detail_url, headers=headers, timeout=30).content.decode()
        emt = etree.HTML(resp)
        # 根据detail_url判断类型
        try:
            if 'EXTRAIT' in detail_url.upper():
                # 注册日期
                text = emt.xpath('//div[@datapath="dateImmatriculationRegistre"]/p/text()')
                register_time = (''.join(re.search(r'\d{1,2}/\d{1,2}/\d{4}', text[0]).group().split()) if re.search(
                    r'\d{1,2}/\d{1,2}/\d{4}', text[0]) else '') if text else ''
                # 法律形式
                legal_form = emt.xpath(
                    '//div[@datapath="entreprise.personneMorale.identification.formeJuridique.libelle"]/p//text()')[
                    0].strip() if emt.xpath(
                    '//div[@datapath="entreprise.personneMorale.identification.formeJuridique.libelle"]/p//text()') else ''
            else:
                text = emt.xpath(
                    '//div[@id="showHideContent"]/div[@class="showHideSwitched"]/div[contains(@class,"horsRcs")]/table/tr/td[@class="last"]/div/p[1]/span[2]/text()')
                register_time = ''.join(text[0].split()) if text else ''
                tmp_legal_form = emt.xpath(
                    '//div[@id="showHideContent"]/div[@class="showHideSwitched"]/div[contains(@class,"horsRcs")]/table/tr/td[@class="first"]/div/p[5]/*//text()')
                legal_form = tmp_legal_form[1].strip() if tmp_legal_form else ''
            meta = (cid, register_time, legal_form)
            self.conn.ping()
            sql = insert_sql('french_company_info', (
                'cid', 'register_time', 'legal_form'), meta)
            self.cur.execute(sql)
            self.conn.commit()
        except Exception as e:
            traceback.print_exc()
            save_log(e, '../../logs/french.log')

    def get_rcs_data(self, headers):
        headers['User-Agent'] = random.choice(UserAgent)
        resp = requests.post(self.activ_url, headers=headers, timeout=30,
                             data=str(self.db.spop('rcsIDSet', 100)).replace(' ', ''))
        self.handle_response(resp, self.activ_del_detail_url)

    def get_hors_data(self, headers):
        # 请求未注册的数据
        headers['User-Agent'] = random.choice(UserAgent)
        resp = requests.post(self.hors_url, headers=headers, timeout=30,
                             data=str(self.db.spop('horsIDSet', 100)).replace(' ', ''))
        self.handle_response(resp, self.hors_detail_url)

    def get_del_data(self, headers):
        headers['User-Agent'] = random.choice(UserAgent)
        resp = requests.post(self.del_url, headers=headers, timeout=30,
                             data=str(self.db.spop('delIDSet', 100)).replace(' ', ''))
        self.handle_response(resp, self.activ_del_detail_url)

    def run(self):
        for wd in self.string_temp:
            # 判断该值是否已经搜索过
            db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                                   health_check_interval=30, retry_on_timeout=True)
            if not db.sismember('succeeded_wd', wd):
                # 重新导入队列id数据
                # db.delete('queue:count')
                # db.delete('rcsIDSet')
                # db.delete('horsIDSet')
                # db.delete('delIDSet')
                # self.getIDs(wd)
                # # 获取首页数据
                # self.handle_request(wd)
                # # 将redis数据转存mysql
                # self.redis2mysql()
                # 获取详情页数据
                self.handle_task()
            else:
                print(f'{wd} already requested！')

        # self.handle_task()


if __name__ == '__main__':
    spider = FrenchSpider()
    spider.run()
