import asyncio
import json, re, time, traceback
import os, sys

sys.path.append('/root/qvenv')
import redis, pymysql
import multiprocessing
import string
import requests, urllib3
from lxml import etree
from tools.insert_update import insert_sql
from tools.logout import save_log
from configs.cfg import *
from selenium import webdriver
from multiprocessing import Pool
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
# from urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecureRequestWarning


class FrenchSpider:
    # urllib3.disable_warnings(InsecureRequestWarning)
    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
    string_temp = string.ascii_uppercase
    db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                           health_check_interval=30)
    pipeline = db.pipeline()
    conn = pymysql.connect(host=mysql_host, user=mysql_name, port=mysql_port, password=mysql_pwd,
                           database='source', charset='utf8')

    def __init__(self):
        # 用来获取相应搜索内容反应结果的所有公司id
        self.id_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/parPhrase?phrase={}&typeProduitMisEnAvant=EXTRAIT&domaine=FR'
        # 源url
        self.base_url = 'https://www.infogreffe.com/entreprise-societe/428257042-bureau-van-dijk-information-management-750199B187990000.html?typeProduitOnglet=EXTRAIT&afficherretour=true&tab=entrep'
        # 已注册的url
        self.activ_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/resumeEntreprise?typeRecherche=ENTREP_RCS_ACTIF'
        self.activ_del_detail_url = 'https://www.infogreffe.com/entreprise-societe/{}-{}-{}0000.html?typeProduitOnglet=EXTRAIT&afficherretour=true'
        # 未注册的url
        self.hors_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/resumeEntreprise?typeRecherche=ENTREP_HORS_RCS'
        self.hors_detail_url = 'https://www.infogreffe.com/entreprise-societe/{}-{}-{}.html?afficherretour=true'
        # 已注销的url
        self.del_url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/resumeEntreprise?typeRecherche=ENTREP_RCS_RADIES'
        # self.url = 'https://www.infogreffe.com/services/entreprise/rest/recherche/derniereRechercheEntreprise'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
        }

    def getIDs(self, keyword):
        '''
        ['critereRecherchePrincipal', 'critereRecherche', 'entrepRechInfosComplementaires', 'entrepRCSStoreResponse', 'entrepMultiStoreResponse', 'entrepRadieeStoreResponse', 'entrepHorsRCSStoreResponse', 'reprStoreResponse', 'typeProduitMisEnAvant']
        :return:
        '''
        print(f'put data to redis queue with {keyword}...')
        resp = requests.get(self.id_url.format(keyword), headers=self.headers, verify=False).content.decode()
        print('Go')
        tmp_dic = json.loads(resp)
        # 获取已注册公司数据
        rcs_items = tmp_dic['entrepRCSStoreResponse']['items']
        if rcs_items:
            [self.pipeline.sadd('rcsIDSet', item['id']) for item in rcs_items]
        self.pipeline.execute()
        print('rcs put into...')
        # 获取已注销公司数据
        del_items = tmp_dic['entrepRadieeStoreResponse']['items']
        if del_items:
            [self.pipeline.sadd('delIDSet', item['id']) for item in del_items]
        self.pipeline.execute()
        print('del put into...')
        # # 获取未注册公司数据
        hors_items = tmp_dic['entrepHorsRCSStoreResponse']['items']
        if hors_items:
            [self.pipeline.sadd('horsIDSet', item['id']) for item in hors_items]
        self.pipeline.execute()
        print('hors put into...')

    def handle_request(self, wd):
        # option = webdriver.ChromeOptions()
        # option.headless = True
        # driver = webdriver.Chrome(options=option)
        options = webdriver.ChromeOptions()
        options.add_argument('--disable-extensions')  # 启动无痕界面
        options.add_argument('--start-maximized')  # 设置浏览器窗口最大化
        options.add_argument('--headless')  # 浏览器不提供可视化页面
        # 关闭chrome正受到测试脚本控制
        options.add_experimental_option('detach', True)
        options.add_experimental_option('useAutomationExtension', False)
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        options.add_argument('--disable-gpu')  # 禁用GPU加速
        # options.add_argument('window-size=1920x3000')  # 指定浏览器分辨率
        options.add_argument('--no-sandbox')  # 添加沙盒模式
        options.add_argument('--disable-dev-shm-usage')
        driver = webdriver.Chrome(options=options)
        try:
            driver.implicitly_wait(20)
            driver.get(self.base_url)
            driver.find_element(By.ID, 'acceptcookies').click()
            driver.find_element(By.ID, 'champRechercheGlobale').click()
            driver.find_element(By.ID, 'champRechercheGlobale').send_keys(wd)
            driver.find_element(By.ID, 'champRechercheGlobale').send_keys(Keys.ENTER)
            time.sleep(20)
            cookies = driver.get_cookies()
            c = ''
            for cookie in cookies:
                if not cookie['name'].startswith('__ut'):
                    c += f'{cookie["name"]}={cookie["value"]};'
            headers = {
                'Content-Type': 'text/plain',
                'cookie': c,
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
            }
            db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                                   health_check_interval=30)
            process_count = multiprocessing.cpu_count()
            pool = Pool(process_count)
            while True:
                pool.apply_async(self.handle_tasks, (self.get_rcs_data, headers))
                pool.apply_async(self.handle_tasks, (self.get_hors_data, headers))
                pool.apply_async(self.handle_tasks, (self.get_del_data, headers))
                if db.scard('rcsIDSet') == 0 and db.scard('horsIDSet') == 0 and db.scard(
                        'delIDSet') == 0:
                    print(f'{wd} finished')
                    # 将处理完成的搜索值放到队列中，防止重复请求
                    db.sadd('succeeded_wd', wd)
                    pool.close()
                    pool.join()
                    print('The browser is closing...')
                    # driver.quit()
                    break
        except Exception as e:
            save_log(e, '../../logs/french.log')

    def handle_response(self, resp, detail_url):
        content = resp.content.decode()
        dic = json.loads(content)
        items = dic['items']
        for item in items:
            cid = item['id']  # 公司id
            numeroDossier = item['numeroDossier']  # 编号
            name = item['libelleEntreprise']['denomination']  # 公司名称
            siren = str(item['siren'])  # 警号
            nic = item['nic']
            if nic:
                siret = siren + nic  # 注册编码
            else:
                siret = ''
            lignes = item['adresse']['lignes']  # 地址
            postcode = item['adresse']['codePostal']  # 邮编
            city = item['adresse']['bureauDistributeur']
            register_address = ' '.join(lignes) + ' ' + city
            sourceDonnees = item['sourceDonnees']  # 数据来源
            struck_off_date = item['dateRadiation'] if item['dateRadiation'] else ''  # 罢工时间
            startup_type = item['typeEtab']  # 启动类型
            activity = {}
            activity['NAF'] = item['activite']['codeNAF']  # 法国产业分类代码
            activity['NAFName'] = item['activite']['libelleNAF']
            status = item['etatSurveillance']  # 公司状态
            # 将法语转成英文
            translationTable = str.maketrans("éàèùâêîôûç", "eaeuaeiouc")
            t = name.translate(translationTable).lower()
            res = re.sub(r'\W', '-', t)
            detail = detail_url.format(siren, res, numeroDossier)
            meta = (cid, name, detail, siren, siret, postcode, register_address, sourceDonnees, struck_off_date,
                    startup_type, str(activity), status)
            self.handle_detail(detail, meta)
        print('inserted 100')

    def handle_detail(self, detail_url, meta):
        resp = requests.get(detail_url, headers=self.headers).content.decode()
        emt = etree.HTML(resp)
        # 根据detail_url判断类型
        if 'EXTRAIT' in detail_url:
            # 注册日期
            text = emt.xpath('//div[@datapath="dateImmatriculationRegistre"]/p/text()')[0]
            register_time = re.search(r'\d{1,2}/\d{1,2}/\d{4}', text).group()
            # 法律形式
            legal_form = \
                emt.xpath(
                    '//div[@datapath="entreprise.personneMorale.identification.formeJuridique.libelle"]/p//text()')[0]
        else:
            register_time = emt.xpath(
                '//div[@id="showHideContent"]/div[@class="showHideSwitched"]/div[contains(@class,"horsRcs")]/table/tr/td[@class="last"]/div/p[1]/span[2]/text()')[
                0]
            legal_form = emt.xpath(
                '//div[@id="showHideContent"]/div[@class="showHideSwitched"]/div[contains(@class,"horsRcs")]/table/tr/td[@class="first"]/div/p[5]/*//text()')[
                1]

        meta = list(meta)
        meta.extend((register_time, legal_form))
        # 将数据插入mysql
        table_name = 'french_company_as_siren'
        fields = ('cid', 'name', 'detail_url', 'siren', 'siret', 'postcode', 'register_address', 'datasource',
                  'struck_off_date', 'startup_type', 'activity', 'status', 'register_time', 'legal_form')
        self.db.lpush('queue:french_data', str(tuple(meta)))
        if self.db.llen("queue:french_data") >= 100:
            for i in range(100):
                self.pipeline.brpop('queue:french_data', timeout=20)
            result = self.pipeline.execute()
            datas = [eval(i[1]) for i in result]
            try:
                self.conn.ping()
                cur = self.conn.cursor()
                sql = insert_sql(table_name, fields, datas)
                cur.execute(sql)
                self.conn.commit()
                print('inserted')
            except Exception as e:
                save_log(e, '../../logs/french.log')
                save_log('insert failed','../../logs/french.log')

    def get_rcs_data(self, headers):
        rcs_ids = self.db.spop('rcsIDSet', 100)
        resp = requests.post(self.activ_url, headers=headers,
                             data=str(rcs_ids).replace(' ', ''))
        # 如果id更新，则清空idSet，重新请求
        print(resp.content.decode())
        # if not json.loads(content)['items']:
        #     self.run()
        self.handle_response(resp, self.activ_del_detail_url)

    def get_hors_data(self, headers):
        # 请求未注册的数据
        hors_ids = self.db.spop('horsIDSet', 100)
        resp = requests.post(self.hors_url, headers=headers,
                             data=str(hors_ids).replace(' ', ''))
        print(resp.content.decode())
        self.handle_response(resp, self.hors_detail_url)

    def get_del_data(self, headers):
        del_ids = self.db.spop('delIDSet', 100)
        resp = requests.post(self.del_url, headers=headers,
                             data=str(del_ids).replace(' ', ''))
        print(resp.content.decode())
        self.handle_response(resp, self.activ_del_detail_url)

    def handle_tasks(self, func, headers):
        # 每个子进程分配一个新的loop
        loop = asyncio.get_event_loop()
        # 初始化业务类，转成task或future
        loop.run_until_complete(func(headers))

    def run(self):
        for wd in self.string_temp[1:]:
            # 判断该值是否已经搜索过
            db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True, db=12,
                                   health_check_interval=30)
            if not db.sismember('succeeded_wd', wd):
                # 重新导入队列id数据
                db.delete('rcsIDSet')
                db.delete('horsIDSet')
                db.delete('delIDSet')
                self.getIDs(wd)
                self.handle_request(wd)
            else:
                print(f'{wd} already requested！')
            break

if __name__ == '__main__':
    spider = FrenchSpider()
    spider.run()
