#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: list_page_crawler.py
@time: 2017/12/27 21:42
"""
import copy
import re
import time

import requests
from pyquery import PyQuery

from common import util
from common.util import get_time_stamp, json_loads, create_nine_num
from config.app_conf import SEARCH_LIST_FAIL, SEARCH_LIST_NOT_FOUND, SEARCH_LIST_SUCCESS, PROVINCE_CODE_DICT, \
    SEARCH_LIST_HISTORY_NAME_FOUND, GEETEST_BREAK_URL, GEETEST_ACCOUNT, HOME_PAGE_URL, LIST_URL, \
    CAPTCHA_URL, CRAWL_TABLE_CONF, SEARCH_LIST_INVALID
from crawler.detail_page_crawler import DetailPageCrawler
from ext.crawler_handler import CrawlerHandler
from ext.data_wrap import DataWrap
from ext.redis_result_handler import RedisResultHandler
from model.http_error import HttpError


class ListPageCrawler(CrawlerHandler):
    # 最大待发送队列大小
    MAX_QUEUE_SIZE = 50

    def __init__(self, search_flag,
                 store_tube,
                 proxy_type,
                 log):
        super(ListPageCrawler, self).__init__(store_tube, proxy_type, log)

        # redis 结果存储
        self.__redis_result_handler = RedisResultHandler(log)
        self.detail_crawler = DetailPageCrawler(parse_flag=CRAWL_TABLE_CONF['result_table']['parse_flag'],
                                                crawl_flag=CRAWL_TABLE_CONF['seed_table']['crawl_flag'],
                                                store_tube=store_tube,
                                                proxy_type=proxy_type,
                                                redis_result_handler=self.__redis_result_handler,
                                                log=log)

        #  状态标识
        self.search_flag = search_flag
        self.search_flag_time = self.search_flag + '_time'

    # 请求列表页
    def request_list_page(self, session, company, host, search_province):
        province_code = PROVINCE_CODE_DICT[search_province]
        first_url = 'http://{}/corp-query-search-advancetest.html?searchword={}&tab=ent_tab&cStatus=0&eYear=0&area=0&filter=0&province={}'.format(
            host, company, province_code)

        headers = {
            'Host': host,
            'User-Agent': self.get_user_agent(),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Accept-Encoding': 'gzip, deflate',
            'Upgrade-Insecure-Requests': "1",
            'Referer': 'http://{}/corp-query-search-1.html'.format(host),
            'Connection': 'keep-alive',
        }

        session.headers.update(headers)

        resp, error = self.request_retry(session, host, session.get, first_url)
        if error == HttpError.NOT_FOUND:
            self.log.error("当前属性不存在: company = {} url = {}".format(company, first_url))
            return None

        return resp

    # 解析html
    def parse_base_html_text(self, param_list_dict, text):
        jq = PyQuery(text, parser='html')
        a_list = jq.find('.search_list_item.db').items()
        for a_item in a_list:
            href = a_item.attr('href')
            company = a_item.find('h1').text()
            if not isinstance(company, basestring):
                continue

            # 企业名称
            company = company.strip()
            if company == '':
                continue

            # 替换掉空格
            company = company.replace(' ', '')

            param = {'_id': company,
                     'href': href}

            # 企业状态
            status = a_item.find('.ying_satus2.ml10').text()
            if isinstance(status, basestring) and status.strip() != '':
                status = status.strip().replace(' ', '')
                param['status'] = status

            # 统一社会信用号
            register_code = a_item.find('.div-map2').find('.g3').text()
            if isinstance(register_code, basestring) and register_code.strip() != '':
                register_code = register_code.strip().replace(' ', '')
                param['register_code'] = register_code

            # 注册时间
            register_date = a_item.find('.div-info-circle2').find('.g3').text()
            if isinstance(register_date, basestring) and register_date.strip() != '':
                register_date = register_date.strip().replace(' ', '')
                param['register_date'] = register_date

            # 历史名称
            history_name = a_item.find('.div-info-circle3').find('.g3').text()
            if isinstance(history_name, basestring) and history_name.strip() != '':
                history_name = history_name.strip().replace(' ', '')
                param['history_name'] = history_name

            # 计算省份
            province = self.cal_province(register_code)
            if province in param_list_dict:
                param_list_dict[province].append(param)
            else:
                param_list_dict[province] = [param]

    # 解析html
    def parse_html_text(self, param_list_dict, text):
        jq = PyQuery(text, parser='html')
        a_list = jq.find('a').items()
        for a_item in a_list:
            href = a_item.attr('href')
            company = a_item.find('h1').text()
            if not isinstance(company, basestring):
                continue

            # 企业名称
            company = company.strip()
            if company == '':
                continue

            # 替换掉空格
            company = company.replace(' ', '')

            param = {'_id': company,
                     'href': href}

            # 企业状态
            status = a_item.find('.wrap-corpStatus').text()
            if isinstance(status, basestring) and status.strip() != '':
                status = status.strip().replace(' ', '')
                param['status'] = status

            # 统一社会信用号
            register_code = a_item.find('.div-map2').find('.g3').text()
            if isinstance(register_code, basestring) and register_code.strip() != '':
                register_code = register_code.strip().replace(' ', '')
                param['register_code'] = register_code

            # 注册时间
            register_date = a_item.find('.div-info-circle2').find('.g3').text()
            if isinstance(register_date, basestring) and register_date.strip() != '':
                register_date = register_date.strip().replace(' ', '')
                param['register_date'] = register_date

            # 历史名称
            history_name = a_item.find('.div-info-circle3').find('.g3').text()
            if isinstance(history_name, basestring) and history_name.strip() != '':
                history_name = history_name.strip().replace(' ', '')
                param['history_name'] = history_name

            # 计算省份
            province = self.cal_province(register_code)
            if province in param_list_dict:
                param_list_dict[province].append(param)
            else:
                param_list_dict[province] = [param]

    # 解析列表页
    def parse_first_list_page(self, search_key, text):

        # 参数存储字典  分省份存储， 总局 省份代号: gsxt
        param_list_dict = {}
        total_page = 0

        jq = PyQuery(text, parser='html')

        try:
            # 判断搜索出来的结果数目
            count = int(jq.find('.search_result_span1').text().strip())
            if count <= 0:
                return param_list_dict, SEARCH_LIST_NOT_FOUND, total_page

        except Exception as e:
            self.log.error("解析列表页个数失败: company = {} ".format(search_key))
            self.log.exception(e)
            self.log.error("解析企业数目失败: {}".format(text))
            return param_list_dict, SEARCH_LIST_FAIL, total_page

        if count > 10:
            try:
                pattern = '<a href="javascript:turnOverPage\((.*?)\)">.*?</a>'
                regex = re.compile(pattern)
                search_list = regex.findall(text)
                if len(search_list) <= 0:
                    total_page = 0
                else:
                    total_page = int(search_list[-1])
            except Exception as e:
                self.log.error("解析页码数目失败: ")
                self.log.exception(e)
                total_page = 0

        # 解析html文本
        self.parse_base_html_text(param_list_dict, text)
        return param_list_dict, SEARCH_LIST_SUCCESS if len(param_list_dict) > 0 else SEARCH_LIST_FAIL, total_page

    # 匹配历史名称
    def __is_history_name(self, history_name, origin_company):
        if not isinstance(history_name, basestring):
            return False

        if self.compare_company(history_name, origin_company) == 0:
            return True

        name_list = history_name.strip().replace(" ", "").split("；")
        for name in name_list:
            if self.compare_company(name, origin_company) == 0:
                return True

        return False

    # 存储解析的结果
    def store_parse_result(self, origin_company, param_list_dict, seed_table):
        status = SEARCH_LIST_NOT_FOUND
        origin_province = None
        origin_href = None
        newest_name = None
        company_set = set()
        is_find = False
        for province, item_list in param_list_dict.iteritems():

            for item in item_list:
                company = item['_id']

                # 添加同名过滤，只采集第一匹配的企业
                if company in company_set:
                    continue

                company_set.add(company)

                href = item['href']
                history_name = item.get('history_name')
                param = copy.deepcopy(item)
                param['_in_time'] = util.get_now_time()

                # 判断名称是否一致且历史名称是否相等
                if self.compare_company(origin_company, company) == 0:
                    status = SEARCH_LIST_SUCCESS
                    origin_province = province
                    origin_href = href

                    # 把新名称存储到现有的种子表中
                    self.save_param_batch(seed_table, [{
                        '_id': company,
                        'href': href,
                        '_in_time': util.get_now_time(),
                        'origin_province': province,
                        self.search_flag: SEARCH_LIST_SUCCESS,
                        self.search_flag_time: util.get_now_time(),
                    }])
                    is_find = True
                    break
                elif status != SEARCH_LIST_SUCCESS and self.__is_history_name(history_name, origin_company):
                    status = SEARCH_LIST_HISTORY_NAME_FOUND
                    origin_province = province
                    origin_href = href
                    newest_name = company

                    # 把新名称存储到现有的种子表中
                    self.save_param_batch(seed_table, [{
                        '_id': company,
                        'href': href,
                        '_in_time': util.get_now_time(),
                        'origin_province': province,
                        self.search_flag: SEARCH_LIST_SUCCESS,
                        self.search_flag_time: util.get_now_time(),
                    }])
                    is_find = True
                    break
            if is_find:
                break

        # 存储信息到调度发起列表
        result_item = {
            '_id': origin_company,
            self.search_flag: status,
            self.search_flag_time: util.get_now_time(),
        }

        # 存储企业名单抓取状态
        if origin_province is not None:
            result_item['origin_province'] = origin_province
        if origin_href is not None:
            result_item['href'] = origin_href
        if newest_name is not None:
            result_item['newest_name'] = newest_name

        return origin_province, status, result_item

    def geetest_breaking_c2567(self, session, host, company):
        """
        geetest API破解
        :param company:
        :param session: 未破解session，不含 JSESSIONID & SECTOKEN
        :param host:
        :return: 返回带有JSESSIONID（已绑定 searchword）的 session
        """
        #
        # # 先访问首页
        # resp, error = self.request_retry(session, host, session.get, HOME_PAGE_URL.format(host), timeout=15)

        # 获取Captcha
        captcha_url = CAPTCHA_URL.format(get_time_stamp())

        resp, error = self.request_retry(session, host, session.get, captcha_url)
        if resp is None or error == HttpError.NOT_FOUND:
            self.log.error("极验破解获取 521 js解密失败:  company:{}".format(company))
            return None

        # 抽取验证需要的参数
        json_text = json_loads(resp.content)
        if json_text is None:
            self.log.error("解析验证码入口参数失败: company:{} text:{}".format(
                company, resp.content))
            return None

        gt = json_text.get('gt')
        challenge = json_text.get('challenge')
        # 拼接破解请求地址
        break_url = GEETEST_BREAK_URL.format(gt=gt, challenge=challenge, host=host,
                                             user=GEETEST_ACCOUNT["USER"], passwd=GEETEST_ACCOUNT["PASSWORD"])

        try:
            resp = requests.get(break_url, timeout=60)
        except Exception as e:
            self.log.error("访问破解服务失败: company:{} break_url:{} ".format(
                company, break_url))
            self.log.exception(e)
            return None

        json_text = json_loads(resp.text)
        if json_text is None:
            self.log.error("极验破解返回信息解析异常: company:{} text:{}".format(
                company, resp.text))
            return None

        # 未拿到正确破解结果则进行重试
        break_status = json_text.get("status")
        if break_status != 'ok':
            self.log.error("破解状态错误: company:{} text:{}".format(
                company, resp.text))
            return None

        # 记录极验破解 cookie
        # self.log.info("极验破解服务: province:{} company:{} status:{}, text:{}".format(
        #     search_province, company, break_status, json_text))

        # 先访问测试接口
        test_url = 'http://{}/corp-query-search-test.html?searchword={}'.format(host, company)
        resp, error = self.request_retry(session, host, session.get, test_url, timeout=20)

        # 抽取参数
        challenge = json_text.get('challenge')
        validate = json_text.get('validate')

        token = create_nine_num()

        post_data = {
            'province': "",
            'geetest_challenge': challenge,
            'geetest_validate': validate,
            'geetest_seccode': u'{}|jordan'.format(validate),
            'searchword': company,
            'tab': 'ent_tab',
            'token': token,
        }

        headers = {
            'Referer': HOME_PAGE_URL.format(host),
            'Origin': 'http://{}'.format(host),
            'Content-Type': 'application/x-www-form-urlencoded',
            'Upgrade-Insecure-Requests': '1',
            'Host': host,
            'DNT': '1',
        }

        session.headers.update(headers)
        list_url = LIST_URL.format(host)

        resp, error = self.request_retry(session, host, session.post, list_url, data=post_data, timeout=20)
        if resp is not None and resp.status_code == 200:
            self.save_session(session)
            return resp

        self.log.error("极验破解后请求页面失败: company:{}".format(company))
        return None

    # 实际抓取
    def crawl_list_page(self, company, host, seed_table):
        status = SEARCH_LIST_FAIL
        origin_province = None
        result_item = {
            '_id': company,
            self.search_flag: status,
            self.search_flag_time: util.get_now_time(),
        }

        # 判断企业是否合法
        if self.is_company_invalid(company):
            result_item = {
                '_id': company,
                self.search_flag: SEARCH_LIST_NOT_FOUND,
                self.search_flag_time: util.get_now_time(),
            }

            return SEARCH_LIST_NOT_FOUND, origin_province, result_item

        session = self.get_session(host)

        try:
            # 增加验证绑定 JSESSIONID 步骤
            self.log.info("开始极验破解: company = {}".format(company))
            resp = self.geetest_breaking_c2567(session, host, company)
            if resp is None:
                self.log.error("极验破解失败: status:{} company:{}".format(
                    SEARCH_LIST_FAIL, company))
                return status, origin_province, result_item

            if "您的访问行为被判定为DoS攻击，系统拒绝为您提供服务。" in resp.text:
                self.log.error("IP被封禁: 判定为DoS攻击 status:{} company:{}".format(
                    SEARCH_LIST_FAIL, company))
                return status, origin_province, result_item

            if "您的访问行为被判定为疑似网络爬虫，请于20分钟后再进行访问。" in resp.text:
                self.log.error("IP被封禁: 疑似网络爬虫 status:{} company:{}".format(
                    SEARCH_LIST_FAIL, company))
                return status, origin_province, result_item

        except Exception as e:
            self.log.error("极验破解异常: company = {}".format(company))
            self.log.exception(e)
            return status, origin_province, result_item

        if "您的操作过于频繁，涉嫌违法访问" in resp.text:
            self.log.error("IP被封禁: 违法访问 host = {} company = {}".format(
                host, company))
            return status, origin_province, result_item

        # 解析页面
        param_list_dict, status, total_page = self.parse_first_list_page(company, resp.text)

        # 如果解析失败 则不存储任何结果
        if status == SEARCH_LIST_FAIL:
            self.log.error("IP被封禁: 解析失败 host = {} company = {}".format(
                host, company))
            return status, origin_province, result_item

        self.log.info("列表页滑动验证码破解成功: company:{}".format(company))

        # 如果没有搜索出任何信息，则打上种子标签退出
        if status == SEARCH_LIST_NOT_FOUND:
            result_item = {
                '_id': company,
                self.search_flag: status,
                self.search_flag_time: util.get_now_time(),
            }
            return status, origin_province, result_item

        # 存储抓取结果
        origin_province, status, result_item = self.store_parse_result(company,
                                                                       param_list_dict,
                                                                       seed_table)

        return status, origin_province, result_item

    # 处理采集状态
    def __process_crawl_status(self, seed_table, company, status, result_item):
        if status == SEARCH_LIST_FAIL:
            self.log.info("采集失败: status:{} company:{}".format(status, company))
            return

        # 如果搜索没有任何结果，则直接存储采集结果
        if status == SEARCH_LIST_NOT_FOUND or status == SEARCH_LIST_INVALID:
            self.__redis_result_handler.save(company, DataWrap.empty_wrap())
            return

        # 这里发送详情页采集
        if result_item is None:
            self.log.error("结果数据异常: seed_table = {} status = {} company = {}".format(seed_table, status, company))
            return

        href = result_item.get("href")
        origin_province = result_item.get("origin_province")
        seed_item = {
            'company': company,
            'href': href,
            # 这里的host 与省份没有直接关系，比如 gs.gsxt.gov.cn 搜出安徽的企业详情页信息，因为搜索的是全国
            'host': "www.gsxt.gov.cn",  # 最新测试发现不需要指定host，32个站点都可以根据href拼接成url进行访问
            'province': origin_province,
            'seed_table': seed_table,
        }

        self.detail_crawler.crawl_process(1, seed_item)

    # 抓取列表页
    def crawl_process(self, process_id, company, seed_table):
        """
        抓取入口
        :param process_id: 多进程ID
        :param seed_table: 种子来源表
        :param company: 需要搜索的企业名称
        :return:
        """
        start_time = time.time()
        result_item = None
        origin_province = None
        status = SEARCH_LIST_FAIL
        host = "www.gsxt.gov.cn"

        self.log.info("开始抓取企业: process_id:{} host:{} company:{}".format(
            process_id, host, company))

        seed_item = {
            '_id': company,
            '_in_time': util.get_now_time()
        }
        self.save_param_info(seed_table, seed_item)

        for _ in xrange(5):
            try:
                status, origin_province, result_item = self.crawl_list_page(
                    company, host, seed_table)
            except Exception as e:
                status = SEARCH_LIST_FAIL
                origin_province = None
                self.log.error("请求列表页错误: process_id:{} company:{} host:{}".format(
                    process_id, company, host))
                self.log.exception(e)
            if status == SEARCH_LIST_FAIL:
                continue

            break

        # 处理不同的采集状态
        self.__process_crawl_status(seed_table, company, status, result_item)

        try:
            if result_item is not None:
                # 存储种子信息
                self.save_param_info(seed_table, result_item)

        except Exception as e:
            self.log.error("发送采集状态失败: process_id:{} status:{} table:{} company:{}".format(
                process_id, status, seed_table, company))
            self.log.exception(e)

        if origin_province is None:
            self.log.info("抓取状态: use_time:{}s status:{} seed_table:{} company:{}".format(
                status, seed_table, company, time.time() - start_time))
        else:
            self.log.info(
                "抓取状态: use_time:{}s status:{} seed_table:{} origin_province:{} company:{} ".format(
                    status, seed_table, origin_province, company, time.time() - start_time))

        return status
