import json
import random
import traceback
import requests
import redis, pymysql
from lxml import etree
from configs.cfg import *
from tools.insert_update import insert_sql
from tools.ua import user_agent_list
from concurrent.futures import ProcessPoolExecutor


class RussiaSpider:
    conn = pymysql.connect(host=mysql_host, user=mysql_name, port=mysql_port, password=mysql_pwd,
                           database='spiderdb', charset='utf8')
    cur = conn.cursor()
    db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True,
                           db=14, health_check_interval=30, retry_on_timeout=True)
    pipeline = db.pipeline()
    base_url = 'https://companies.rbc.ru/api/web/v1/search/filter_params/?query='
    category_url = 'https://companies.rbc.ru/api/web/v1/search/filter_params/?category_id={}&query='
    category_time_url = 'https://companies.rbc.ru/api/web/v1/search/filter_params/?category_id={}&query=&registration_date_from={}&registration_date_to={}'
    category_adr_url = 'https://companies.rbc.ru/api/web/v1/search/filter_params/?category_id={}&query=&region_id[]={}&registration_date_from={}&registration_date_to={}'
    category_capital_url = 'https://companies.rbc.ru/api/web/v1/search/filter_params/?capital_from={}&capital_to={}&category_id={}&query=&region_id[]={}&registration_date_from={}&registration_date_to={}'
    final_url = 'https://companies.rbc.ru/api/web/v1/search/filter_params/?capital_from={}&capital_to={}&category_id={}&query=&region_id[]={}&registration_date_from={}&registration_date_to={}&status[]={}'
    un_growth_url = 'https://companies.rbc.ru/search/?capital_from={}&capital_to={}&category_id={}&query=&region_id[]={}&registration_date_from={}&registration_date_to={}&sorting=-revenue_growth_rate&status[]={}'
    growth_url = 'https://companies.rbc.ru/search/?capital_from={}&capital_to={}&category_id={}&query=&region_id[]={}&registration_date_from={}&registration_date_to={}&sorting=revenue_growth_rate&status[]={}'
    # detail_time_url = 'https://companies.rbc.ru/search/?category_id={}&query=&registration_date_from={}&registration_date_to={}'
    headers = {
        'user-agent': random.choice(user_agent_list)
    }
    status_code = ['ACTIVE', 'LIQUIDATED', 'LIQUIDATING', 'INVALIDATING', 'REORGANIZING']

    address_code = ['77', '78', '66', '54', '50', '74', '16', '63', '23', '52', '02', '36', '61', '59', '25', '24',
                    '22', '55', '76', '38', '35', '31', '18', '39', '37', '34', '26', '64', '42', '67', '43', '27',
                    '72', '71', '62', '70', '56', '73', '58', '46', '69', '48', '33', '21', '32', '47', '86', '91',
                    '68', '51', '40', '05', '12', '10', '57', '60', '29', '28', '30', '11', '13', '44', '53', '45',
                    '03', '20', '07', '65', '14', '75', '92', '19', '15', '06', '04', '41', '09', '01', '89', '08',
                    '49', '79', '17', '87', '83']
    start_time = 1990
    end_time = 2022
    max_capital = 1000000000
    capital_step = 100000
    is_check_592 = False
    is_check_608 = False

    def handle_req(self):
        '''获取一级分类id'''
        resp = requests.get('https://companies.rbc.ru/api/web/v1/search/filter_params/?query=', headers=self.headers)
        content = resp.content.decode()
        c1_items = json.loads(content)['category']
        c1_ids = [c1['id'] for c1 in c1_items]
        return c1_ids

    def handle_task(self, ids):
        '''使用递归方式获取子分类id'''
        with ProcessPoolExecutor(max_workers=10) as pool:
            for cid in ids:
                if cid == 592:
                    # 判断592是否是重复请求
                    self.handle_592(cid)
                elif cid == 608:
                    self.handle_608(cid)
                else:
                    resp = requests.get(self.category_url.format(cid), headers=self.headers)
                    content = resp.content.decode()
                    tmp_dic = json.loads(content)
                    items = tmp_dic['category']
                    if items:
                        # 还有子分类
                        next_ids = [c2['id'] for c2 in items]
                        pool.submit(self.handle_task, next_ids)
                    else:
                        # 下面没有子分类
                        self.handle_detail_by_time(cid)

    def handle_592(self, cid):
        if self.is_check_592:
            print('Repeated requests')
        else:
            self.is_check_592 = True
            resp = requests.get(self.category_url.format(cid), headers=self.headers)
            content = resp.content.decode()
            tmp_dic = json.loads(content)
            items = tmp_dic['category']
            if items:
                # 还有子分类
                next_ids = [c2['id'] for c2 in items]
                self.handle_task(next_ids)

    def handle_608(self, cid):
        if self.is_check_608:
            print('Repeated requests')
        else:
            self.is_check_608 = True
            resp = requests.get(self.category_url.format(cid), headers=self.headers)
            content = resp.content.decode()
            tmp_dic = json.loads(content)
            items = tmp_dic['category']
            if items:
                # 还有子分类
                next_ids = [c2['id'] for c2 in items]
                self.handle_task(next_ids)

    def handle_detail_by_time(self, cid):
        '''根据注册日期给公司分类'''
        for register_time in range(self.start_time, self.end_time):
            time_url = self.category_time_url.format(cid, register_time, register_time + 1)
            time_count = self.handle_detail_by(time_url)
            if time_count > 1000:
                # 根据地理位置分类
                for code in self.address_code:
                    add_url = self.category_adr_url.format(cid, code, register_time, register_time + 1)
                    add_count = self.handle_detail_by(add_url)
                    if add_count > 1000:
                        # 根据注册资本分类
                        for capital in range(0, self.max_capital, self.capital_step):
                            cap_url = self.category_capital_url.format(capital, capital + self.capital_step, cid, code,
                                                                       register_time, register_time + 1)
                            capital_count = self.handle_detail_by(cap_url)
                            if capital_count > 1000:
                                # 根据公司状态分类
                                for status in self.status_code:
                                    status_url = self.final_url.format(capital, capital + self.capital_step, cid, code,
                                                                       register_time, register_time + 1,
                                                                       status)
                                    status_count = self.handle_detail_by(status_url)
                                    if status_count > 1000:
                                        # 根据增长率提取数据
                                        self.parse_detail(
                                            self.growth_url.format(capital, capital + self.capital_step, cid, code,
                                                                   register_time, register_time + 1,
                                                                   status) + '&page={}', status_count)
                                        self.parse_detail(
                                            self.un_growth_url.format(capital, capital + self.capital_step, cid, code,
                                                                      register_time, register_time + 1,
                                                                      status) + '&page={}', status_count)
                                    elif status_count > 0:
                                        self.parse_detail(status_url.replace(
                                            'https://companies.rbc.ru/api/web/v1/search/filter_params/',
                                            'https://companies.rbc.ru/search/') + '&page={}', status_count)
                            elif capital_count > 0:
                                self.parse_detail(
                                    cap_url.replace('https://companies.rbc.ru/api/web/v1/search/filter_params/',
                                                    'https://companies.rbc.ru/search/') + '&page={}', capital_count)
                    elif add_count > 0:
                        self.parse_detail(add_url.replace('https://companies.rbc.ru/api/web/v1/search/filter_params/',
                                                          'https://companies.rbc.ru/search/') + '&page={}', add_count)
            elif time_count > 0:
                self.parse_detail(time_url.replace('https://companies.rbc.ru/api/web/v1/search/filter_params/',
                                                   'https://companies.rbc.ru/search/') + '&page={}', time_count)

    def handle_detail_by(self, url):
        '''根据地址给公司分类'''
        resp = requests.get(url, headers=self.headers)
        content = resp.content.decode()
        tmp_dic = json.loads(content)
        count = tmp_dic['count']
        return count

    def parse_detail(self, url, count):
        total_page = count // 20 if count % 20 == 0 else count // 20 + 1
        db = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd, decode_responses=True,
                               db=14, health_check_interval=30, retry_on_timeout=True)
        pipeline = db.pipeline()
        for page in range(1, total_page + 1):
            resp = requests.get(url.format(page), headers=self.headers).content.decode()
            emt = etree.HTML(resp)
            items = emt.xpath('//main[@class="company-detail-layout__content"]/div[contains(@class,"company-card")]')
            for item in items:
                cname = item.xpath('./a[@class="company-name-highlight"]/text()')[0].strip()
                detail_url = item.xpath('./a[@class="company-name-highlight"]/@href')[0]
                company_type = item.xpath('./a[@class="company-name-highlight"]/@title')[0]
                status = item.xpath('./span[contains(@class,"company-status-badge")]//text()')[0]
                ceo = item.xpath('./p[@class="company-card__info"][1]/text()')[0]
                register_address = item.xpath('./p[@class="company-card__info"][last()]/text()')[0]
                register_time = \
                    item.xpath('./div[@class="company-card__block"][1]/p[@class="company-card__info"][1]/text()')[0]
                # 单位₽（卢布）
                capital = ''.join(
                    item.xpath('./div[@class="company-card__block"][1]/p[@class="company-card__info"][2]/text()')[
                        0].split())
                # 纳税人识别号
                tin = item.xpath('./div[@class="company-card__block"][2]/p[@class="company-card__info"][1]/text()')[0]
                # 国家注册号
                cin = item.xpath('./div[@class="company-card__block"][2]/p[@class="company-card__info"][2]/text()')[0]
                income = ''.join(item.xpath('./div[@class="company-card__block"][3]/p[@class="company-card__info"][1]/text()')[0].split())
                growth_rate = item.xpath('./div[@class="company-card__block"][3]/p[@class="company-card__info"][2]/text()')[0].replace(',', '.')
                pipeline.lpush('q:russia_datas', str((cin, cname, detail_url, company_type, ceo, tin, register_address, register_time, status,
                                   capital, income, growth_rate)))
            pipeline.execute()
            print(db.llen('q:russia_datas'))

    def insert_data(self):
        while True:
            try:
                if self.db.llen('q:russia_datas') >= 1000:
                    for i in range(1000):
                        self.pipeline.brpop('q:russia_datas', timeout=30)
                    result = self.pipeline.execute()
                    datas = [eval(i[1]) for i in result if i]
                    if datas:
                        sql = insert_sql('russia_company_info', ('cin', 'cname', 'detail_url','company_type', 'ceo', 'tin', 'register_address', 'register_time', 'status', 'capital',
                        'income', 'growth_rate'), datas)
                        self.conn.ping()
                        self.cur.execute(sql)
                        self.conn.commit()
                        print('inserted succeed')
            except Exception as e:
                traceback.print_exc()

    def run(self):
        # 获取一级分类id
        ids = self.handle_req()
        # 获取二级分类id
        self.handle_task(ids)
        # test_url = 'https://companies.rbc.ru/search/?category_id=589&query=&registration_date_to=1991'
        # self.parse_detail(test_url + '&page={}', 900)


if __name__ == '__main__':
    spider = RussiaSpider()
    # spider.run()
    spider.insert_data()
