#!/usr/bin/env python 
# coding:utf-8
# @Time :9/27/18 16:23

import copy
import json
import sys

import click
from pyquery import PyQuery

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from common.date_util import DateUtil
from ext.task_base import TaskBase
from logger import AppLogger
from mq.mq_factory import MQFactory

logger = AppLogger('zibo_new_register.log').get_logger()


class ZiboNewRegister(TaskBase):
    __START_URL = "http://www.xinyongzibo.gov.cn/out/data.json"

    # 新gsxt的线索队列
    __GSXT_DATA = {
        "province": u"山东",
        "company": None,
        "source_url": u"www.xinyongzibo.gov.cn",
        "register_code": None,
    }

    __FORMAT_DEMO = {
        "dispatch": "xzxkxxList",
        "page": "1",
        "rows": "10",
        "xzxk": "市场",
        "xzcf": "市场",
    }

    def __init__(self, search_word, before_date, log):
        super(ZiboNewRegister, self).__init__(log)
        self._reset_beanstalk_handler(MQFactory.get_gsxt_clue_beanstalk_handler(log))
        self.__search_word = search_word
        # self.__search_total_page = search_total_page
        self.__before_date = DateUtil.get_before_day(before_date)
        self.log.info("获得 {} 之后的数据...".format(self.__before_date))
        self.__set_headers()

    def __set_headers(self):
        headers = {
            "Host": "www.xinyongzibo.gov.cn",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "DNT": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, json_result):

        total = json_result.get('total')

        if not total:
            self.log.error("__get_page_data 无法获得总页面数...")
            return -1

        total_int = int(total)

        if total_int <= 10:
            return 1

        return total_int // 10 + 1

    def __get_page_data(self, search_word, page):
        form_data = copy.deepcopy(self.__FORMAT_DEMO)
        form_data["xzxk"], form_data["xzcf"] = [search_word] * 2
        form_data['page'] = page
        resp = self._proxy_requests.post(self.__START_URL, proxy_type=ProxyType.KUNPENG_DYNAMIC,
                                         data=form_data)

        if resp is None:
            self.log.error("__get_page_data 请求数据时未正确拿到数据，此时page为={} 检索词为={}".format(page, search_word))
            return -1

        json_result = json.loads(resp.content)

        company_list = json_result.get("rows")

        if not company_list:
            self.log.error("__get_page_data 未获得企业名单数据")
            return -1

        if not isinstance(company_list, list):
            self.log.error("__get_page_data 获得企业名单数据格式错误")
            return -1

        for per_company_info in company_list:
            company_name = per_company_info.get("xk_xdr")
            if not company_name:
                self.log.warn("__get_page_data 未能正确获得注册公司信息")
                continue

            date = per_company_info.get("sjc").replace('/', '-')
            if date < self.__before_date:
                return -2

            # print json.dumps(per_company_info, ensure_ascii=False)
            gsxt_data = copy.deepcopy(self.__GSXT_DATA)
            gsxt_data["company"] = company_name
            # print json.dumps(gsxt_data, ensure_ascii=False)
            self.log.info("当前采集的结果为： {}".format(json.dumps(gsxt_data, ensure_ascii=False)))
            self.push_gsxt_clue_data(gsxt_data)

        total_page = self.__get_total_page(json_result)

        return total_page

    def start(self, *args, **kwargs):
        total_page = self.__get_page_data(self.__search_word, page='1')
        if total_page <= 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        if total_page == 1:
            self.log.info("当前页码只存在第一页，不进行遍历...")

        for page in range(2, total_page + 1):
            try:
                self.log.info("当前正在采集第 {} 页".format(page))
                result = self.__get_page_data(self.__search_word, page)
                if result == -2:
                    self.log.info("采集到截止日期，停止采集...")
                    break
                self.log.info("当前页面采集完成，当前是第 {} 页".format(page))
            except BaseException as e:
                self.log.error("当前采集信息遇到异常, 当前是第 {} 页".format(page))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--search_word',
              default='市场主体设立',
              type=str,
              help='检索词')
@click.option('--before_date',
              default=10,
              type=int,
              help='采集截止日期')
def main(search_word, before_date):
    try:
        ZiboNewRegister(search_word, before_date, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
