#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: search_page_crawler.py
@time: 2017/12/27 21:42
@列表页抓取功能
"""
import json
import re

import requests
from pyquery import PyQuery

from common import util
from config.app_conf import SEARCH_LIST_FAIL, ENCRY_ZJ_URL, SEARCH_LIST_NOT_FOUND, SEARCH_LIST_SUCCESS
from ext.crawler_handler import CrawlerHandler


# 列表页抓取功能
class SearchPageCrawler(CrawlerHandler):
    def __init__(self, store_tube, seed_table, param_table, search_page_table, search_flag, log):
        super(SearchPageCrawler, self).__init__(store_tube, log)

        #  状态标识
        self.search_flag = search_flag
        self.search_flag_time = self.search_flag + '_time'

        # 存储表信息
        # 种子表
        self.seed_table = seed_table

        # 参数信息存储表
        self.param_table = param_table

        # 页面存储表
        self.search_page_table = search_page_table

        # host
        self.host = 'gsxt.zjaic.gov.cn'

    # 获得加密后的pripid
    def get_encry_pripid(self, encry_url, script):
        session = requests.session()
        session.headers['Content-Type'] = 'application/json'

        post_data = {"script": script}

        try:
            r = session.post(encry_url, json=post_data, timeout=10)
            if r.status_code == 200:
                return r.text
        except Exception as e:
            self.log.exception(e)

        return None

    @staticmethod
    def json_loads(text):
        try:
            return json.loads(text)
        except Exception:
            return None

    # 获得加密后的信息
    def get_encry_pripid_detail(self, encry_url, script):
        encry_href = self.get_encry_pripid(encry_url, script)
        if encry_href is None:
            return None

        json_data = self.json_loads(encry_href)
        if json_data is None:
            return None

        error = json_data.get('error', 'fail')
        if error == 'fail':
            return None
        if error is not None:
            return None

        result = json_data.get('result', None)
        if result is None:
            return None

        return result

    # 请求列表页
    def request_list_page(self, session, url):
        session.headers = {
            'Host': self.host,
            'User-Agent': self.get_user_agent(),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }

        resp = self.task_request(session, self.host, session.get, url=url)
        if resp is None:
            return None

        return resp.text

    # 抓取第一页
    def get_first_list_page(self, session, search_key_word):

        total_page, status, text = 0, SEARCH_LIST_FAIL, None

        url = 'http://{}/client/entsearch/list?isOpanomaly=&pubType=1&searchKeyWord={}'.format(
            self.host, search_key_word)

        text = self.request_list_page(session, url=url)
        if text is None:
            return total_page, status, text

        try:
            # 先获得数目
            jq = PyQuery(text, parser='html')
            count = jq.find(".mod.enterprise-info").find('.title').find('.light').text()
            if count is None or count == '':
                status = SEARCH_LIST_NOT_FOUND
                return total_page, status, text

            if int(count) <= 0:
                status = SEARCH_LIST_NOT_FOUND
                return total_page, status, text

            # 获得起码数
            pattern = 'list\?isOpanomaly=.*?&pubType=.*?&searchKeyWord=.*?&currentPage=(\d+)'
            regex = re.compile(pattern)
            search_list = regex.findall(text)
            if len(search_list) <= 0:
                status = SEARCH_LIST_SUCCESS
                return 0, status, text

            status = SEARCH_LIST_SUCCESS
            return int(search_list[-1]), status, text
        except Exception as e:
            self.log.error("解析数据数目失败: ")
            self.log.exception(e)
            status = SEARCH_LIST_FAIL

        return total_page, status, text

    def get_search_key_word(self, company):
        script = "strEnc('{}','a','b','c')".format(company)
        search_key_word = self.get_encry_pripid_detail(ENCRY_ZJ_URL, script)
        return search_key_word

    def parse_html_text(self, company, text):
        param_list = []

        try:
            jq = PyQuery(text, parser='html')
            li_list = jq.find('.enterprise-info-list').find('li').items()
            for li_item in li_list:

                a_info = li_item.find('a')
                if a_info is None or len(a_info) <= 0:
                    continue

                param = {}

                # 解析企业状态信息
                status = a_info.find('i').text()
                if status is not None and status.strip() != '':
                    status = status.replace(' ', '')
                    param['status'] = status

                a_info.find('span[class=tip]').remove()
                a_info.find('i').remove()
                company = a_info.text()
                search_name = company.replace(' ', '')
                if search_name == '':
                    continue

                # 企业名称
                param['_id'] = search_name

                # 注册号
                register_code = None
                code_text = li_item.find('.item-text').find('.code').text()
                if code_text is not None and code_text.strip() != '':
                    part = code_text.split(':')
                    if len(part) >= 2:
                        register_code = part[1]

                if register_code is not None and register_code.strip() != '':
                    register_code = register_code.replace(' ', '').replace('\r', '').replace('\t', '').replace('\n', '')
                    param['register_code'] = register_code

                # 注册时间
                register_date_str = li_item.find('.item-text').eq(0).find('span').eq(-1).text()
                if register_date_str is not None and register_date_str.strip() != '':
                    register_date = register_date_str.strip().split('：')[1]
                    param['register_date'] = register_date

                # 历史名称
                item_text_list = li_item.find('.item-text')
                if item_text_list.length >= 2:
                    item_text_str = item_text_list.eq(-1).text()
                    if item_text_str is not None and item_text_str.strip() != '':
                        item_text_str = item_text_str.replace(' ', '')
                        param['history_name'] = item_text_str.split('：', 1)[1]

                param_list.append(param)

        except Exception as e:
            self.log.error("解析失败: company = {}".format(company))
            self.log.exception(e)

        return param_list

    # 翻页
    def request_turn_page(self, session, company, search_key_word, param_list, page_list, total_page):

        for page in xrange(1, total_page + 1):
            url = 'http://{}/client/entsearch/list?isOpanomaly=&pubType=1&searchKeyWord={}&currentPage={}'.format(
                self.host, search_key_word, page)
            text = self.request_list_page(session, url=url)
            if text is None:
                continue

            parse_list = self.parse_html_text(company, text)
            param_list.extend(parse_list)
            page_list.append(text)

    # 存储解析的结果
    def store_parse_result(self, company, param_list, page_list):
        status = SEARCH_LIST_NOT_FOUND

        seed_list = []
        for param in param_list:
            _id = param.get('_id')

            if self.compare_company(_id, company) == 0:
                status = SEARCH_LIST_SUCCESS
                continue

            seed_list.append({
                '_id': _id,
                '_in_time': util.get_now_time(),
            })

        # 存储额外发现的种子
        self.save_seed_batch(self.seed_table, seed_list)

        # 存储列表参数信息
        self.save_seed_batch(self.param_table, param_list)

        # 存储网页结果信息
        if len(page_list) > 0:
            self.save_list_page_info(self.search_page_table, {
                '_id': company,
                'text': page_list,
                '_in_time': util.get_now_time(),
            })

        return status

    def crawl_list_page(self, session, company):
        search_key_word = self.get_search_key_word(company)
        if search_key_word is None:
            return SEARCH_LIST_FAIL

        # 得到第一页信息 总页码数 抓取状态 文本信息
        total_page, status, text = self.get_first_list_page(session, search_key_word)
        # 如果没有搜索出任何信息，则打上种子标签退出
        if status == SEARCH_LIST_NOT_FOUND:
            self.save_seed_info(self.seed_table, {
                '_id': company,
                self.search_flag: status,
                self.search_flag_time: util.get_now_time(),
            })
            return status

        # 如果抓取失败 则不存储任何结果
        if status == SEARCH_LIST_FAIL:
            return status

        # 存储所有列表页信息
        page_list = [text]

        # 解析第一页信息
        param_list = self.parse_html_text(company, text)

        # 翻页抓取
        self.request_turn_page(session, company, search_key_word, param_list, page_list, total_page)

        # 存储抓取到的信息
        status = self.store_parse_result(company, param_list, page_list)

        # 存储企业名单抓取状态
        self.save_seed_info(self.seed_table, {
            '_id': company,
            self.search_flag: status,
            self.search_flag_time: util.get_now_time(),
        })
        return status

    # 抓取列表页
    def crawl_process(self, company):
        ''' 抓取入口
        :param company: 需要搜索的企业名称
        :return:
        '''
        self.log.info("开始抓取企业: company = {}".format(company))
        session = self.get_session(self.host)
        try:
            # company = '国网电力'
            status = self.crawl_list_page(session, company)
        except Exception as e:
            status = SEARCH_LIST_FAIL
            self.log.error("请求列表页错误: company = {}".format(company))
            self.log.exception(e)

        self.log.info("抓取状态: company = {} status = {}".format(company, status))
        return status, company
