#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: detail_page_crawler.py
@time: 2018/1/5 11:27
"""
import time

from cache.record_cache import RecordCache
from common import util
from common.table_manage import TableManage
from config.app_conf import CRAWL_DETAIL_FAIL, PROVINCE_HOST_DICT
from ext.crawler_handler import CrawlerHandler
from model.crawl_model import GsxtModel, AnnualModel, STORE_TYPE_LIST
from model.http_error import HttpError
from model.mq_model import MqModel
from model.primary_info import ANNUAL_TYPE_TO_LAB


class AnnualPageCrawler(CrawlerHandler):
    # 年报属性 url 映射字典
    AR_URL_EXTRACT_DICT = {
        # 基本信息(与企业资产状况使用同一组url)
        AnnualModel.BASE_INFO: {"e": "baseinfoUrl",
                                "sfc": "vannualSfcAssertUrl",
                                "pb": "vAnnPbAssetUrl"},
        # 网站信息
        AnnualModel.WEBSITES_INFO: "allWebInfoUrl",
        # 行政许可信息（注意annual拼写是错误的）
        AnnualModel.ADMINISTRATIVE_LICENSING_INFO: "annulLicenceUrl",
        # 股东出资信息
        AnnualModel.SHAREHOLDER_INFO: "sponsorUrl",
        # 对外投资信息
        AnnualModel.INVESTED_COMPANIES_INFO: "allForinvestmentInfoUrl",
        # 分支机构
        AnnualModel.BRANCH_INFO: {"e": "vAnnualReportBranchProductionUrl",
                                  "sfc": "vAnnualReportSfcBranchUrl"},
        # 企业资产状况
        AnnualModel.ENTERPRISE_ASSET_STATUS_INFO: {"e": "baseinfoUrl",
                                                   "sfc": "vannualSfcAssertUrl",
                                                   "pb": "vAnnPbAssetUrl"},
        # 年报修改记录
        AnnualModel.EDIT_CHANGE_INFO: "alterUrl",
        # 股权变更
        AnnualModel.EDIT_SHAREHOLDING_CHANGE_INFO: "vAnnualReportAlterstockinfoUrl",
        # 对外担保信息列表
        AnnualModel.OUT_GUARANTEE_INFO: "forGuaranteeinfoUrl",
    }

    def __init__(self, parse_flag,
                 crawl_flag,
                 store_tube,
                 is_init_target_db,
                 is_init_source_db, log):
        super(AnnualPageCrawler, self).__init__(store_tube, log,
                                                is_init_target_db=is_init_target_db,
                                                is_init_source_db=is_init_source_db)

        # 解析标识
        self.parse_flag = parse_flag

        # 抓取标识
        self.crawl_flag = crawl_flag
        self.crawl_flag_time = self.crawl_flag + '_time'

        self.log.info("年报抓取对象初始化完成...")

    # 抓取详情页
    def crawl_process(self, process_id, param_dict, record_cache_dict):
        # send_item = {
        #     'province': province,
        #     'company': company,
        #     'year': year,
        #     'base_href': base_href,
        #     'index_href': index_href,
        #     'an_che_id': an_che_id,
        #     'ent_type': ent_type,
        #     'annual_type': annual_type,
        # }

        # # 需要抓取的域名
        # host = param_dict.get('host')
        # if host is None:
        #     return CRAWL_DETAIL_FAIL
        # 网页库表 根据省份进行计算
        province = param_dict.get('province')
        # 企业名称，列表页搜索出来的 search_name 字段
        company = param_dict.get('company')
        # 年份信息
        year = param_dict.get("year")
        # 年报字段信息
        an_che_id = param_dict.get('an_che_id')
        # 基本页面 url 尾缀
        base_href = param_dict.get('base_href')
        # 索引页面 url 尾缀
        index_href = param_dict.get('index_href')
        # 企业类型
        ent_type = param_dict.get('ent_type')
        # 年报类型
        annual_type = param_dict.get('annual_type')

        self.log.info("当前年报抓取: process_id:{} province:{} year:{} an_che_id:{} company:{} annual_type:{}".format(
            process_id, province, year, an_che_id, company, annual_type))

        # 获得来源表信息
        annual_table = TableManage.get_annual_table(province)

        self.log.info("当前详情页抓取: process_id:{} province:{} company:{}".format(
            process_id, province, company))

        # 抓取年报属性
        try:
            status = self.crawl_field(company, province, year,
                                      an_che_id, ent_type, annual_type,
                                      base_href, index_href, record_cache_dict,
                                      annual_table)
        except Exception as e:
            status = CRAWL_DETAIL_FAIL
            self.log.error("年报抓取异常: process_id:{} province:{} year:{} an_che_id:{} company:{} annual_type:{}".format(
                process_id, province, year, an_che_id, company, annual_type))
            self.log.exception(e)

        self.log.info("年报抓取状态: process_id:{} province:{} year:{} status:{} an_che_id:{} company:{} "
                      "annual_type:{}".format(process_id, province, year, status, an_che_id, company, annual_type))

        return status

    # 抓取年报属性入口
    def crawl_field(self, company, province, year, an_che_id, ent_type, annual_type,
                    base_href, index_href, record_cache_dict, annual_table):

        # status = CRAWL_DETAIL_FAIL
        if province in PROVINCE_HOST_DICT:
            host = PROVINCE_HOST_DICT[province]
        else:
            host = PROVINCE_HOST_DICT['gsxt']

        # 初始化session
        session = self.get_session(host)

        # TODO 抓取基本信息
        # TODO 抓取索引链接信息
        # TODO 抽取需要抓取的板块信息
        # TODO 抓取年报板块
        # TODO 存储年报板块信息

        # 抓取基本信息
        # text, base_info = self.get_base_info(session, host, base_href, an_che_id)
        # if text is None:
        #     self.log.error("基本信息抓取失败: province:{} host:{} company:{}".format(
        #         province, host, company))
        #     return CRAWL_DETAIL_FAIL

        # 抓取全部索引链接信息
        ar_all_url_dict = self.get_annual_url_link(session, host, index_href, an_che_id)

        # 判断索引是否抓取成功
        if ar_all_url_dict is None:
            self.log.error("抽取关键信息失败: province:{} host:{} company:{}".format(
                province, host, company))
            return CRAWL_DETAIL_FAIL

        ar_url_dict = self.__get_ar_url_link(host, annual_type, ar_all_url_dict)

        # 添加基本信息链接到字典
        # ar_url_dict[AnnualModel.BASE_INFO] = 'http://{}{}{}.html'.format(host, base_href, an_che_id)

        # 获得所有需要抓取的板块信息（有3种：e(0, 18), Sfc, Pb）
        # 公司企业 e(0, 18) 海南翱鸣贸易有限公司 entType以列表页基本信息抓取的为准
        # 农专社 Sfc 定安岭口华纵槟榔专业合作社
        # 个体户 Pb 武威市凉州区岔路口鑫隆商行
        ar_field_set = self.get_ar_field_set(annual_type)

        # 创建消息队列发送对象
        mq_model = MqModel()

        # 创建存储对象
        web_page_model = {
            '_id': company,
            '_in_time': util.get_now_time(),
            'province': province,
            # 重置解析标识
            self.parse_flag: 0,
            # self.get_annual_field_key(year, AnnualModel.BASE_INFO): base_info,
        }

        # 处理年报属性抓取
        ar_success_count = self.process_annual_field(session, host, company,
                                                     ar_url_dict, ent_type, web_page_model, year,
                                                     ar_field_set, record_cache_dict)

        # 获取抓取状态信息
        status = self.get_crawl_status(ar_success_count, len(ar_field_set))

        # 如果为所有属性都抓取失败状态，则不存储任何状态信息
        if status == CRAWL_DETAIL_FAIL:
            return status

        # 存储网页库信息
        mq_model.append_detail_page(TableManage.get_detail_page_table(province), web_page_model)

        # 针对只完成部分抓取的调度请求，也需要记录抓取状态以及最后抓取时间
        # if status == CRAWL_DETAIL_SUCCESS:
        annual_model = {
            '_id': an_che_id,
            self.crawl_flag: status,
            self.crawl_flag_time: util.get_now_time(),
        }
        mq_model.append_seed(annual_table, annual_model)

        # 缓冲控制 存储年报缓存信息
        mq_model.append_annual_cache(RecordCache.get_annual_cache_key(province, company, year),
                                     RecordCache.annual_record_to_cache(record_cache_dict))

        # 这里发送到存储消息队列
        self.send_store_beanstalk(mq_model.get_store_model())
        return status

    # 获得年报存储mongo 属性 key
    @staticmethod
    def get_annual_field_key(year, inline_field):
        return "__{}#{}#{}".format(GsxtModel.ANNUAL_INFO, year, inline_field)

    # 获得所有需要抓取的板块信息
    def get_ar_field_set(self, annual_type):
        full_field_dict = ANNUAL_TYPE_TO_LAB.get(annual_type)
        if full_field_dict is None:
            self.log.error("当前annual_type = {} 没有对应的板块映射表...")
            return set(self.AR_URL_EXTRACT_DICT.keys())

        ar_field_set = set()
        for field, value in self.AR_URL_EXTRACT_DICT.iteritems():
            if field in full_field_dict:
                ar_field_set.add(field)

        return ar_field_set

    # 抓取年报基本信息
    def get_base_info(self, session, host, company,
                      url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.BASE_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.BASE_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.BASE_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取年报模块索引链接信息
    def get_annual_url_link(self, session, host, index_href, an_che_id):
        ar_url_dict = {}
        session.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Host': host,
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': self.get_user_agent(),
        }
        url = 'http://{}{}{}.html'.format(host, index_href, an_che_id)
        resp, error = self.request_retry(session, host, session.post, url)
        if resp is None:
            return None

        if error == HttpError.NOT_FOUND:
            self.log.error("当前属性不存在: field = index_href url = {}".format(url))
            return None

        # 获取原生索引字典
        ar_raw_url_dict = resp.json()

        for key, value in ar_raw_url_dict.iteritems():
            ar_url_dict[key] = value
        return ar_url_dict

    # 抽取需要抓取的索引信息
    def __get_ar_url_link(self, host, annual_type, ar_all_url_dict):
        url_link_dict = {}
        for field, value in self.AR_URL_EXTRACT_DICT.iteritems():
            if isinstance(value, basestring):
                field_key = value
            else:
                if annual_type is None:
                    continue

                if annual_type in value:
                    field_key = value[annual_type]

            if field_key in ar_all_url_dict:
                url_link_dict[field] = 'http://{}{}'.format(host, ar_all_url_dict[field_key])

        return url_link_dict

    # 年报抓取入口
    def process_annual_field(self, session, host, parse_company,
                             url_dict, ent_type, page_model, year,
                             ar_field_set, record_cache_dict):
        success_count = 0
        # 年报函数列表
        ar_func_dict = {
            # 基本信息（不使用base_href,重新拿取）
            AnnualModel.BASE_INFO: self.get_base_info,
            # 网站信息
            AnnualModel.WEBSITES_INFO: self.get_websites_info,
            # 行政许可信息
            AnnualModel.ADMINISTRATIVE_LICENSING_INFO: self.get_administrative_licensing_info,
            # 股东出资信息
            AnnualModel.SHAREHOLDER_INFO: self.get_shareholder_info,
            # 对外投资信息
            AnnualModel.INVESTED_COMPANIES_INFO: self.get_invested_companies_info,
            # 分支机构
            AnnualModel.BRANCH_INFO: self.get_branch_info,
            # 企业资产状况
            AnnualModel.ENTERPRISE_ASSET_STATUS_INFO: self.get_enterprise_asset_status_info,
            # 年报修改记录
            AnnualModel.EDIT_CHANGE_INFO: self.get_edit_change_info,
            # 股权变更
            AnnualModel.EDIT_SHAREHOLDING_CHANGE_INFO: self.get_edit_shareholding_change_info,
            # 对外担保信息列表
            AnnualModel.OUT_GUARANTEE_INFO: self.get_out_guarantee_info,
        }

        for field, func in ar_func_dict.iteritems():

            # 如果当前属性并没有在控制范围内，则不抓取
            if field not in ar_field_set:
                continue

            # 判断是否需要抓取
            is_need_crawl = False
            while True:
                # 判断属性是否在缓存结构中
                if field not in record_cache_dict:
                    is_need_crawl = True
                    break

                # 获得上一次计算的时间
                cache_time = record_cache_dict[field]

                # 如果过期了
                if RecordCache.is_gs_expired(field, cache_time):
                    is_need_crawl = True
                    break
                break

            # 如果在缓存期间的话就不需要抓取，则统计+1
            if not is_need_crawl:
                success_count += 1
                continue

            is_success = func(session, host, parse_company, url_dict, ent_type, page_model, year, ar_field_set)
            success_count += is_success
            # 如果抓取成功 则记录到缓存中
            if is_success == self.SUCCESS:
                record_cache_dict[field] = int(time.time())

        return success_count

    # get请求获取(带ent_type的)
    # 例子：http://www.gsxt.gov.cn/corp-query-entprise-info-allWebInfo-PROVINCENODENUM1100006cfadff89a75415a94267fae686988de.html?entType=1
    def __request_get_annual_with_ent(self, session, host, url_dict, ent_type, page_model, year, field):
        url = url_dict.get(field)
        if url is None:
            return self.FAIL
        url = url_dict.get(field) + "?" + "entType={}".format(ent_type)

        text_data_annual = []

        def __inline_request_get_list(page_url, text_list):
            try_times = 0
            records_total = 0
            current = 0
            MAX_TRY_TIMES = 3
            while try_times < MAX_TRY_TIMES:

                resp, error = self.request_retry(session, host, session.get, page_url)
                if resp is None:
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                if error == HttpError.NOT_FOUND:
                    return None, None

                json_data = util.json_loads(resp.text)
                if json_data is None:
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                records_total = json_data.get('recordsTotal')
                if records_total is None:
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                data = json_data.get('data')
                if not isinstance(data, list):
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                current = len(data)
                text_list.append({'url': page_url, 'text': resp.text})
                break

            if try_times >= MAX_TRY_TIMES:
                return None, None

            return records_total, current

        # 请求第一页
        recordsTotal, currentNum = __inline_request_get_list(url, text_data_annual)
        if recordsTotal is None:
            return self.FAIL

        # 继续翻页
        times = 1
        request_total = currentNum
        is_success = True
        while request_total < recordsTotal:
            start_url = url + '&' + 'start={}'.format(request_total)
            recordsTotal, currentNum = __inline_request_get_list(start_url, text_data_annual)
            if recordsTotal is None:
                is_success = False
                break

            times += 1
            request_total += currentNum
            self.log.info(
                "请求翻页次数: field = {} times = {} request_total = {} recordsTotal = {} currentNum = {}".format(
                    field, times, request_total, recordsTotal, currentNum))

        # 如果是完全抓取成功，或者有数据则记录下来
        if is_success or len(text_data_annual) > 0:
            # 存储属性
            page_model[self.get_annual_field_key(year, field)] = {
                STORE_TYPE_LIST: text_data_annual
            }

        if is_success:
            return self.SUCCESS

        return self.FAIL

    # get详情页参数
    def __request_get_annual(self, session, host, url_dict, ent_type, page_model, year, field):
        url = url_dict.get(field)
        if url is None:
            return self.FAIL

        text_data_annual = []

        def __inline_request_get_list(page_url, text_list):
            try_times = 0
            records_total = 0
            current = 0
            MAX_TRY_TIMES = 3
            while try_times < MAX_TRY_TIMES:

                resp, error = self.request_retry(session, host, session.get, page_url)
                if resp is None:
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                if error == HttpError.NOT_FOUND:
                    return None, None

                json_data = util.json_loads(resp.text)
                if json_data is None:
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                records_total = json_data.get('recordsTotal')
                if records_total is None:
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                data = json_data.get('data')
                if not isinstance(data, list):
                    self.switch_proxy(session, host)
                    try_times += 1
                    continue

                current = len(data)
                text_list.append({'url': page_url, 'text': resp.text})
                break

            if try_times >= MAX_TRY_TIMES:
                return None, None

            return records_total, current

        # 请求第一页
        recordsTotal, currentNum = __inline_request_get_list(url, text_data_annual)
        if recordsTotal is None:
            return self.FAIL

        # 继续翻页
        times = 1
        request_total = currentNum
        is_success = True
        while request_total < recordsTotal:
            start_url = url + '?' + 'start={}'.format(request_total)
            recordsTotal, currentNum = __inline_request_get_list(start_url, text_data_annual)
            if recordsTotal is None:
                is_success = False
                break

            times += 1
            request_total += currentNum
            self.log.info(
                "请求翻页次数: field = {} times = {} request_total = {} recordsTotal = {} currentNum = {}".format(
                    field, times, request_total, recordsTotal, currentNum))

        # 如果是完全抓取成功，或者有数据则记录下来
        if is_success or len(text_data_annual) > 0:
            # 存储属性
            page_model[self.get_annual_field_key(year, field)] = {
                STORE_TYPE_LIST: text_data_annual
            }

        if is_success:
            return self.SUCCESS

        return self.FAIL

    # 抓取网站网店信息
    def get_websites_info(self, session, host, company,
                          url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.WEBSITES_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual_with_ent(session, host, url_dict, ent_type,
                                                        page_model, year, AnnualModel.WEBSITES_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.WEBSITES_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取行政许可信息
    def get_administrative_licensing_info(self, session, host, company,
                                          url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.ADMINISTRATIVE_LICENSING_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual_with_ent(session, host, url_dict, ent_type,
                                                        page_model, year, AnnualModel.ADMINISTRATIVE_LICENSING_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.ADMINISTRATIVE_LICENSING_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取股东出资信息
    def get_shareholder_info(self, session, host, company,
                             url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.SHAREHOLDER_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.SHAREHOLDER_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.SHAREHOLDER_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取对外投资信息
    def get_invested_companies_info(self, session, host, company,
                                    url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.INVESTED_COMPANIES_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.INVESTED_COMPANIES_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.INVESTED_COMPANIES_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取分支机构信息
    def get_branch_info(self, session, host, company,
                        url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.BRANCH_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.BRANCH_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.BRANCH_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取企业资产信息
    def get_enterprise_asset_status_info(self, session, host, company,
                                         url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.ENTERPRISE_ASSET_STATUS_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.ENTERPRISE_ASSET_STATUS_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.ENTERPRISE_ASSET_STATUS_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取修改记录信息
    def get_edit_change_info(self, session, host, company,
                             url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.EDIT_CHANGE_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.EDIT_CHANGE_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.EDIT_CHANGE_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取股权变更信息
    def get_edit_shareholding_change_info(self, session, host, company,
                                          url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.EDIT_SHAREHOLDING_CHANGE_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.EDIT_SHAREHOLDING_CHANGE_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.EDIT_SHAREHOLDING_CHANGE_INFO, is_success, time.time() - start_time))
        return is_success

    # 抓取对外担保信息列表
    def get_out_guarantee_info(self, session, host, company,
                               url_dict, ent_type, page_model, year, ar_field_set):
        if AnnualModel.OUT_GUARANTEE_INFO not in ar_field_set:
            return self.FAIL

        start_time = time.time()
        is_success = self.__request_get_annual(session, host, url_dict, ent_type,
                                               page_model, year, AnnualModel.OUT_GUARANTEE_INFO)

        self.log.info("属性抓取状态: company = {} field = {} is_success = {} cost time = {} s".format(
            company, AnnualModel.OUT_GUARANTEE_INFO, is_success, time.time() - start_time))
        return is_success
