#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: list_page_crawler.py
@time: 2017/12/27 21:42
"""
import copy
import re

from pyquery import PyQuery

from common import util
from common.table_manage import TableManage
from config.app_conf import SEARCH_LIST_FAIL, SEARCH_LIST_NOT_FOUND, SEARCH_LIST_SUCCESS, PROVINCE_HOST_DICT, \
    PROVINCE_CODE_DICT, SEARCH_LIST_HISTORY_NAME_FOUND
from ext.crawler_handler import CrawlerHandler
from model.crawl_model import STORE_TYPE_DETAIL
from model.http_error import HttpError


class ListPageCrawler(CrawlerHandler):
    # 最大待发送队列大小
    MAX_QUEUE_SIZE = 50

    def __init__(self, search_flag,
                 download_tube,
                 store_tube,
                 log,
                 is_init_source_db=False,
                 is_init_target_db=False):
        super(ListPageCrawler, self).__init__(store_tube, log,
                                              is_init_source_db=is_init_source_db,
                                              is_init_target_db=is_init_target_db)

        #  状态标识
        self.search_flag = search_flag
        self.search_flag_time = self.search_flag + '_time'

        # 下载消息队列
        self.download_tube = download_tube

    # 请求列表页
    def request_list_page(self, session, company, host, search_province):
        province_code = PROVINCE_CODE_DICT[search_province]
        first_url = 'http://{}/corp-query-search-advancetest.html?searchword={}&tab=ent_tab&cStatus=0&eYear=0&area=0&filter=0&province={}'.format(
            host, company, province_code)

        session.headers = {
            'Host': host,
            'User-Agent': self.get_user_agent(),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Accept-Encoding': 'gzip, deflate',
            'Referer': 'http://{}/corp-query-search-1.html'.format(host),
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',

        }
        resp, error = self.request_retry(session, host, session.get, first_url)
        if error == HttpError.NOT_FOUND:
            self.log.error("当前属性不存在: company = {} url = {}".format(company, first_url))
            return None

        return resp

    # 解析html
    def parse_html_text(self, param_list_dict, text):
        jq = PyQuery(text, parser='html')
        a_list = jq.find('a').items()
        for a_item in a_list:
            href = a_item.attr('href')
            company = a_item.find('h1').text()
            if not isinstance(company, basestring):
                continue

            # 企业名称
            company = company.strip()
            if company == '':
                continue

            # 替换掉空格
            company = company.replace(' ', '')

            param = {'_id': company,
                     'href': href}

            # 企业状态
            status = a_item.find('.wrap-corpStatus').text()
            if isinstance(status, basestring) and status.strip() != '':
                status = status.strip().replace(' ', '')
                param['status'] = status

            # 统一社会信用号
            register_code = a_item.find('.div-map2').find('.g3').text()
            if isinstance(register_code, basestring) and register_code.strip() != '':
                register_code = register_code.strip().replace(' ', '')
                param['register_code'] = register_code

            # 注册时间
            register_date = a_item.find('.div-info-circle2').find('.g3').text()
            if isinstance(register_date, basestring) and register_date.strip() != '':
                register_date = register_date.strip().replace(' ', '')
                param['register_date'] = register_date

            # 历史名称
            history_name = a_item.find('.div-info-circle3').find('.g3').text()
            if isinstance(history_name, basestring) and history_name.strip() != '':
                history_name = history_name.strip().replace(' ', '')
                param['history_name'] = history_name

            # 计算省份
            province = self.cal_province(register_code)
            if province in param_list_dict:
                param_list_dict[province].append(param)
            else:
                param_list_dict[province] = [param]

    # 解析列表页
    def parse_first_list_page(self, search_key, text, search_province):

        # 详情页参数表 表名称与省份信息关联 比如: new_gansu_search_list
        # 'param_table': {
        #     'table': 'new_gsxt_search_list',
        # },
        #
        # # 列表页结果存储 表名称与省份信息关联 比如: new_gansu_search_page
        # 'list_page_table': {
        #     'table': 'new_gsxt_search_page',
        # },

        # 参数存储字典  分省份存储， 总局 省份代号: gsxt
        param_list_dict = {}
        total_page = 0

        jq = PyQuery(text, parser='html')

        try:
            # 判断搜索出来的结果数目
            count = int(jq.find('.search_result_span1').text().strip())
            if count <= 0:
                return param_list_dict, SEARCH_LIST_NOT_FOUND, total_page

        except Exception as e:
            self.log.error("解析列表页个数失败: company = {} province = {}".format(search_key, search_province))
            self.log.exception(e)
            return param_list_dict, SEARCH_LIST_FAIL, total_page

        if count > 10:
            try:
                pattern = '<a href="javascript:turnOverPage\((.*?)\)">.*?</a>'
                regex = re.compile(pattern)
                search_list = regex.findall(text)
                if len(search_list) <= 0:
                    total_page = 0
                else:
                    total_page = int(search_list[-1])
            except Exception as e:
                self.log.error("解析页码数目失败: ")
                self.log.exception(e)
                total_page = 0

        # 解析html文本
        self.parse_html_text(param_list_dict, text)
        return param_list_dict, SEARCH_LIST_SUCCESS if len(param_list_dict) > 0 else SEARCH_LIST_FAIL, total_page

    # 存储解析的结果
    def store_parse_result(self, origin_company, host, param_list_dict, seed_table):
        status = SEARCH_LIST_NOT_FOUND
        beanstalk_list = []
        origin_province = None
        for province, item_list in param_list_dict.iteritems():
            # self.log.info("当前需要存储的省份: company = {} province = {}".format(origin_company, province))

            param_list = []
            seed_list = []
            for item in item_list:
                company = item['_id']
                href = item['href']
                history_name = item.get('history_name')
                param = copy.deepcopy(item)
                # 链接需要存储, 后期如验证会失效，则统一删除
                # param['base_url'] = 'http://{}{}'.format(host, href)
                param.pop('href')
                param['_in_time'] = util.get_now_time()
                param_list.append(param)

                beanstalk_item = {
                    'company': company,
                    'href': href,
                    # 这里的host 与省份没有直接关系，比如 gs.gsxt.gov.cn 搜出安徽的企业详情页信息，因为搜索的是全国
                    'host': host,  # 最新测试发现不需要指定host，32个站点都可以根据href拼接成url进行访问
                    'province': province,
                }

                # 判断名称是否一致且历史名称是否相等
                if self.compare_company(origin_company, company) == 0:
                    status = SEARCH_LIST_SUCCESS
                    origin_province = province
                    beanstalk_item['seed_table'] = seed_table
                    beanstalk_item['company'] = origin_company
                elif status != SEARCH_LIST_SUCCESS and \
                        isinstance(history_name, basestring) and \
                                origin_company in history_name:
                    status = SEARCH_LIST_HISTORY_NAME_FOUND
                    origin_province = province
                    beanstalk_item['seed_table'] = seed_table
                    beanstalk_item['company'] = origin_company

                    # 记录最新名称
                    seed_list.append({
                        '_id': origin_company,
                        'newest_name': company
                    })
                else:
                    seed_list.append({
                        '_id': company,
                        '_in_time': util.get_now_time(),
                        self.search_flag: SEARCH_LIST_SUCCESS,
                        self.search_flag_time: util.get_now_time(),
                    })
                    beanstalk_item['seed_table'] = TableManage.get_seed_table(province)
                beanstalk_list.append(beanstalk_item)

            # 存储详情页
            self.save_seed_batch(TableManage.get_param_table(province), param_list)

            # 存储额外的种子信息
            self.save_seed_batch(TableManage.get_seed_table(province), seed_list)

        return origin_province, status, beanstalk_list

    # 翻页抓取
    def request_turn_page(self, session, company, host, search_province, param_list_dict, page_list, total_page):

        province_code = PROVINCE_CODE_DICT[search_province]

        # 翻页处理
        for page in xrange(2, total_page + 1):
            url = 'http://{}/corp-query-search-advancetest.html?area=0&filter=0&tab=ent_tab&province={}&searchword={}&eYear=0&cStatus=0&page={}'.format(
                host, province_code, company, page)
            resp, error = self.request_retry(session, host, session.get, url)
            if resp is None:
                continue

            if error == HttpError.NOT_FOUND:
                self.log.error("当前页面不存在，跳过当前页: page = {} url = {}".format(page, url))
                continue

            # 解析页面
            self.parse_html_text(param_list_dict, resp.text)
            page_list.append(resp.text)

    # 实际抓取
    def crawl_list_page(self, session, company, host, search_province, seed_table, crawl_field):
        status = SEARCH_LIST_FAIL
        origin_province = None
        total_beanstalk_count = 0

        # 判断企业是否合法
        if self.is_company_invalid(company):
            self.save_seed_info(seed_table, {
                '_id': company,
                self.search_flag: SEARCH_LIST_NOT_FOUND,
                self.search_flag_time: util.get_now_time(),
            })
            return SEARCH_LIST_NOT_FOUND, origin_province, total_beanstalk_count

        # 抓取第一页
        resp = self.request_list_page(session, company, host, search_province)
        if resp is None:
            self.log.error("搜索列表页第一页失败: search_province = {} host = {} company = {}".format(
                search_province, host, company))
            return status, origin_province, total_beanstalk_count

        # 解析页面
        param_list_dict, status, total_page = self.parse_first_list_page(company, resp.text, search_province)
        # 如果没有搜索出任何信息，则打上种子标签退出
        if status == SEARCH_LIST_NOT_FOUND:
            self.save_seed_info(seed_table, {
                '_id': company,
                self.search_flag: status,
                self.search_flag_time: util.get_now_time(),
            })
            return status, origin_province, total_beanstalk_count

        # 如果解析失败 则不存储任何结果
        if status == SEARCH_LIST_FAIL:
            self.log.error("解析失败: search_province = {} host = {} company = {}".format(
                search_province, host, company))
            return status, origin_province, total_beanstalk_count

        # 存储所有列表页信息
        page_list = [resp.text]

        # 翻页请求
        self.request_turn_page(session, company, host, search_province, param_list_dict, page_list, total_page)

        # 获取cookie
        # cookie_dict = self.get_cookie_dict(session)

        # 存储抓取结果
        origin_province, status, beanstalk_list = self.store_parse_result(company, host, param_list_dict, seed_table)

        # 发送详情页抓取请求到消息队列 只有抓取详情页时才发送到消息队列
        if crawl_field == STORE_TYPE_DETAIL:
            self.send_beanstalk(self.download_tube, beanstalk_list)
            self.log.info("消息队列发送成功: company:{}".format(company))

        # 存储企业名单抓取状态
        self.save_seed_info(seed_table, {
            '_id': company,
            self.search_flag: status,
            self.search_flag_time: util.get_now_time(),
        })

        # 当有发送到消息队列的详情页抓取请求 则证明页面有数据 就存储
        total_beanstalk_count = len(beanstalk_list)
        if total_beanstalk_count > 0:
            # 存储抓取的列表页
            self.save_list_page_info(TableManage.get_search_page_table(search_province), {
                '_id': company,
                'text': page_list,
                '_in_time': util.get_now_time(),
            })
            # self.log.info("搜索的企业数目: search_province = {} host = {} count = {} company = {}".format(
            #     self.search_province, host, total_beanstalk_count, company))

        return status, origin_province, total_beanstalk_count

    # 抓取列表页
    def crawl_process(self, process_id, company, search_province, seed_table, crawl_field):
        ''' 抓取入口
        :param crawl_field: 抓取范围 列表页 或者详情页
        :param process_id: 多进程ID
        :param seed_table: 种子来源表
        :param search_province: 需要搜索的省份
        :param company: 需要搜索的企业名称
        :return:
        '''

        # 防呆措施，如果不传入种子表信息 则存入默认的共享种子表
        if seed_table is None:
            seed_table = TableManage.get_seed_table(search_province)

        host = PROVINCE_HOST_DICT[search_province]
        self.log.info("开始抓取企业: process_id:{} search_province:{} host:{} company:{}".format(
            process_id, search_province, host, company))

        session = self.get_session(host)
        try:
            status, origin_province, count = self.crawl_list_page(
                session, company, host, search_province, seed_table, crawl_field)
        except Exception as e:
            status = SEARCH_LIST_FAIL
            origin_province = None
            count = 0
            self.log.error("请求列表页错误: process_id:{} company:{} host:{} province:{}".format(
                process_id, company, host, search_province))
            self.log.exception(e)

        if origin_province is None:
            self.log.info("抓取状态: process_id:{} search_province:{} host:{} status:{} count:{} company:{}".format(
                process_id, search_province, host, status, count, company))
        else:
            self.log.info(
                "抓取状态: process_id:{} search_province:{} host:{} status:{} count:{} origin_province:{} company:{}".format(
                    process_id, search_province, host, status, count, origin_province, company))

        return status, company
