#!/usr/bin/env python 
# coding:utf-8
# @Time :10/16/18 10:06

import copy
import re
import sys

import click
from pyquery import PyQuery
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from common.date_util import DateUtil
from ext.task_base import TaskBase
from logger import AppLogger
from mq.mq_factory import MQFactory

logger = AppLogger('chinanpo_gsxt.log').get_logger()


class ChinanpoGsxt(TaskBase):
    __START_URL = "http://www.chinanpo.gov.cn/search/orgcx.html"
    __MIN_ZHENG_DETAIL_URL = "http://www.chinanpo.gov.cn/search/vieworg.html"
    __DI_FANG_DETAIL_URL = "http://www.chinanpo.gov.cn/search/poporg.html"

    # 新gsxt的线索队列
    __GSXT_DATA = {
        "province": u"山东",
        "company": None,
        "source_url": u"credit.qingdao.gov.cn",
        "register_code": None,
    }

    # params
    # __POST_PARAMS = {
    #     'status': '5',
    #     'regNum': '-1',
    #     'tabIndex': '1'
    # }

    __POST_PARAMS = {
        'status': '5',
        'current_page': '0',
        'page_size': '20',
        'regNum': '-1',
        'page_flag': 'true',
        'pagesize_key': 'macList',
        'tabIndex': '1'
    }

    def __init__(self, before_date, log):
        super(ChinanpoGsxt, self).__init__(log)
        self._reset_beanstalk_handler(MQFactory.get_gsxt_clue_beanstalk_handler(log))
        self.__before_date = DateUtil.get_before_day(before_date)
        self.log.info("获得 {} 之后的数据...".format(self.__before_date))
        self.__set_headers()

    def __set_headers(self):
        headers = {
            "Host": "www.chinanpo.gov.cn",
            "Origin": "http://www.chinanpo.gov.cn",
            "Referer": "http://www.chinanpo.gov.cn/search/orgcx.html",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "DNT": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, text):
        try:
            page_num = re.findall(u"当前第\d+/(\d+)页", text)[0]
            return int(page_num)
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, per_cate, page_count=0):
        if per_cate == "MIN_ZHENG":
            post_data = copy.deepcopy(self.__POST_PARAMS)
            if page_count > 0:
                post_data["goto_page"] = "next"
                post_data["total_count"] = "10000"
            post_data["current_page"] = page_count
            # print post_data
            resp = self._proxy_requests.post(self.__START_URL,
                                             data=post_data,
                                             proxy_type=ProxyType.KUNPENG_DYNAMIC)

            # print resp.text

            if resp is None:
                return -1

            agency_ids = set(re.findall("javascript:toHref\((\d+)\);", resp.text))

            for per_agency_id in agency_ids:
                self.__get_min_zheng_detail(per_agency_id)
                # break

        # if per_cate == "MIN_ZHENG":
        #     pass

        else:
            # pass
            post_data = copy.deepcopy(self.__POST_PARAMS)
            post_data["tabIndex"] = '2'
            resp = self._proxy_requests.post(self.__START_URL,
                                             data=post_data,
                                             proxy_type=ProxyType.KUNPENG_DYNAMIC)
            if resp is None:
                return -1

            agency_infos = set(re.findall("javascript:popOrgWin\(\"(\d+)\",\"(.*?)\"\)", resp.text))

            for per_agency_info in agency_infos:
                self.__get_di_fang_detail(per_agency_info)
                # break

        total_page = self.__get_total_page(resp.text)
        return total_page

    def __get_min_zheng_detail(self, agency_id):
        resp = self._proxy_requests.post(self.__MIN_ZHENG_DETAIL_URL,
                                         data={"orgId": agency_id},
                                         proxy_type=ProxyType.KUNPENG_DYNAMIC)

        if not resp:
            self.log.error("当前页面爬取异常... agency={}".format(agency_id))
            return
        content = self.__parse_detail(resp.text)

    def __get_di_fang_detail(self, agency_info):
        resp = self._proxy_requests.get(self.__DI_FANG_DETAIL_URL,
                                         params={"i": agency_info[0],
                                                 "u": agency_info[1]},
                                        proxy_type=ProxyType.KUNPENG_DYNAMIC)
        if not resp:
            self.log.error("当前页面爬取异常... agency={}".format(agency_id))
            return

        content = self.__parse_detail(resp.text)

    def __parse_detail(self, content):
        resp = html.fromstring(content)
        title = "".join(resp.xpath("//div[@class='title_bg']/h3/text()"))
        print "title>>>>>", title
        name = title.split("统一社会信用代码：")[0]
        registered_code = title.split("统一社会信用代码：")[1]
        originaze = resp.xpath(u'//td[contains(text(), "登记管理机关")]/following-sibling::td[1]/text()')[0].strip()
        competent_org = resp.xpath(u'//td[contains(text(), "业务主管单位")]/following-sibling::td[1]/text()')[0].strip()
        legal_man = resp.xpath(u'//td[contains(text(), "法定代表人")]/following-sibling::td[1]/text()')[0].strip()
        hezhun_date = resp.xpath(u'//td[contains(text(), "成立登记日期")]/following-sibling::td[1]/text()')[0].strip()
        src_registered_capital = resp.xpath(u'//td[contains(text(), "注册资金")]/following-sibling::td[1]/text()')[0].strip()
        website = resp.xpath(u'//td[contains(text(), "网")]/following-sibling::td[1]/text()')[0].strip()
        phone = resp.xpath(u'//td[contains(text(), "联系电话")]/following-sibling::td[1]/text()')[0].strip()
        registration_code = resp.xpath(u'//td[contains(text(), "登记证号")]/following-sibling::td[1]/text()')[0].strip()
        scoial_originaze_type = resp.xpath(u'//td[contains(text(), "社会组织类型")]/following-sibling::td[1]/text()')[0].strip()
        address = resp.xpath(u'//td[contains(text(), "住")]/following-sibling::td[1]/text()')[0].strip()

        # print title, name, registered_code, originaze, competent_org, legal_man, hezhun_date, src_registered_capital, website, phone, scoial_originaze_type, address
        print "registration_code>>>>", registration_code
        # item = {}

    def __grab_category(self, per_cate):
        total_page = self.__get_page_data(per_cate)
        if total_page <= 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(1, total_page):
            try:
                self.log.info("当前采集页面: page = {}".format(page))
                result = self.__get_page_data(per_cate, page)
                if result == -2:
                    self.log.info("采到时间截止，停止采集..")
                    break
                self.log.info("当前页面采集完成: page = {}".format(page))
                # todo 这里debug用
                # break
            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")

    def start(self, *args, **kwargs):
        # categories = ["MIN_ZHENG", "DI_FANG"]
        categories = ["MIN_ZHENG"]
        for per_cate in categories:
            self.__grab_category(per_cate)

# todo 修改支持指定日期采集
@click.command()
@click.option('--before_date',
              default=7,
              type=int,
              help='采集截止时间')
def main(before_date):
    try:
        ChinanpoGsxt(before_date, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
