#!/usr/bin/env python 
# coding:utf-8
# @Time :9/29/18 09:58

import copy
import json
import sys

import click

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from ext.task_base import TaskBase
from logger import AppLogger
from mq.mq_factory import MQFactory

from lxml import html

logger = AppLogger('jinan_new_register.log').get_logger()


class JiNanNewRegister(TaskBase):
    __START_URL = "http://www.jncredit.gov.cn/creditsearch.listcredit.dhtml?orgtype=1&page={}"

    # 新gsxt的线索队列
    __GSXT_DATA = {
        "province": u"山东",
        "company": None,
        "source_url": u"www.jncredit.gov.cn",
        "register_code": None,
    }

    def __init__(self, search_page, log):
        super(JiNanNewRegister, self).__init__(log)
        self._reset_beanstalk_handler(MQFactory.get_gsxt_clue_beanstalk_handler(log))
        self.__search_page = search_page
        self.log.info("爬取 {} 页的数据...".format(search_page if search_page !=0 else '所有'))
        self.__set_headers()

    def __set_headers(self):
        headers = {
            "Host": "www.jncredit.gov.cn",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, html_resp):
        try:
            page_num = html_resp.xpath('//ul[@class="pagination"]/li[last()]/a/text()')[1]
            page_num = int(page_num.strip().split(u'共')[0].replace('/', ''))
            return self.__search_page or page_num
        except Exception as e:
            self.log.error("__get_total_page 获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, url):
        resp = self._proxy_requests.get(url, proxy_type=ProxyType.KUNPENG_DYNAMIC)

        if resp is None:
            self.log.warn("__get_page_data 未能正常请求页面...")
            return -1

        html_resp = html.fromstring(resp.text)

        company_extract = html_resp.xpath('//div[@class="col-xs-9"]/table//tr')

        if not company_extract:
            self.log.warn("__get_page_data 页面未提取到 company_extract 数据，当前url={}".format(url))
            return -1

        if len(company_extract) <= 1:
            self.log.warn("__get_page_data 未能拿到数据...请校验页面信息，当前url={}".format(url))
            return -1

        for per_company_extract in company_extract[1:]:
            company_info = per_company_extract.xpath('./td/text()')
            if not isinstance(company_info, list) or not len(company_info) == 3:
                self.log.warn("__get_page_data 提取 company_info 信息存在错误，请校验...company_info={}".format(company_info))
                continue
            company_name = company_info[0]
            if company_name:
                self.log.info("__get_page_data 当前采集到的新注册企业为={}".format(company_name))
            else:
                self.log.warn("__get_page_data 当前采集到新注册企业名有问题，请校验...company_info={}".format(company_info))
                return -1

            register_code = company_info[1]
            gsxt_data = copy.deepcopy(self.__GSXT_DATA)
            gsxt_data["company"] = company_name
            gsxt_data["register_code"] = register_code
            # print json.dumps(gsxt_data, ensure_ascii=False)
            self.push_gsxt_clue_data(gsxt_data)

        total_page = self.__get_total_page(html_resp)
        return total_page

    def start(self, *args, **kwargs):
        total_page = self.__get_page_data(self.__START_URL.format(1))
        if total_page <= 0:
            self.log.warn("start 获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(2, total_page + 1):
            try:
                self.log.info("start 当前采集页面: page = {}".format(page))
                result = self.__get_page_data(self.__START_URL.format(page))
                if result == -2:
                    self.log.info("start 采到时间截止，停止采集..")
                    break
                self.log.info("start 当前页面采集完成: page = {}".format(page))
            except Exception as e:
                self.log.error("start 当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--search_page',
              default=50,
              type=int,
              help='采集页数, 0代表采集所有页数')
def main(search_page):
    try:
        JiNanNewRegister(search_page, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
