#!/usr/bin/env python 
# coding:utf-8
# @Time :10/18/18 16:45

import copy
import re
import sys
import time

import click
from pyquery import PyQuery
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from common.date_util import DateUtil
from ext.task_base import TaskBase
from logger import AppLogger
from mq.mq_factory import MQFactory
from base.parse_base_worker import ParseBaseWorker

logger = AppLogger('chinanpo_gsxt_difang.log').get_logger()


class ChinanpoGsxtDifang(TaskBase):
    __START_URL = "http://www.chinanpo.gov.cn/search/orgcx.html"
    __DI_FANG_DETAIL_URL = "http://www.chinanpo.gov.cn/search/poporg.html"

    __POST_PARAMS = {
        'status': '-1',
        'current_page': '0',
        'page_size': '20',
        'regNum': '-1',
        'page_flag': 'true',
        'pagesize_key': 'macList',
        'tabIndex': '2',
        'regDate': None,
        'regDateEnd': None,
    }

    __BUSINESS_STATUS_MAP = {
        u"正常": u"存续",
        u"撤销": u"注销",
    }

    def __init__(self, before_date, log):
        super(ChinanpoGsxtDifang, self).__init__(log)
        self._reset_beanstalk_handler(MQFactory.get_gsxt_clue_beanstalk_handler(log))
        self.__before_date = DateUtil.get_before_day(before_date)
        self.__now_date = DateUtil.get_before_day(0)
        self.log.info("获得 {} 之后的数据...".format(self.__before_date))
        self.__set_headers()

        self.host = "www.chinanpo.gov.cn"

        self.parse_base_worker = ParseBaseWorker(host=self.host,
                                                 is_gs_mq_open='True')

    def __set_headers(self):
        headers = {
            "Host": "www.chinanpo.gov.cn",
            "Origin": "http://www.chinanpo.gov.cn",
            "Referer": "http://www.chinanpo.gov.cn/search/orgcx.html",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "DNT": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, text):
        try:
            page_num = re.findall(u"当前第\d+/(\d+)页", text)[0]
            return int(page_num)
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, page_count=0):
        post_data = copy.deepcopy(self.__POST_PARAMS)
        post_data['regDate'] = self.__before_date
        post_data['regDateEnd'] = self.__now_date
        if page_count > 0:
            post_data["goto_page"] = "next"
            post_data["total_count"] = "1000000"
        post_data["current_page"] = page_count
        resp = self._proxy_requests.post(self.__START_URL,
                                         data=post_data,
                                         proxy_type=ProxyType.KUNPENG_DYNAMIC)
        if resp is None:
            return -1

        agency_infos = set(re.findall("javascript:popOrgWin\(\"(\d+)\",\"(.*?)\"\)", resp.text))

        for per_agency_info in agency_infos:
            self.__get_detail(per_agency_info)

        total_page = self.__get_total_page(resp.text)

        return total_page

    # def __get_detail(self, agency_id):
    def __get_detail(self, agency_info):
        resp = self._proxy_requests.get(self.__DI_FANG_DETAIL_URL,
                                         params={"i": agency_info[0],
                                                 "u": agency_info[1]},
                                        proxy_type=ProxyType.KUNPENG_DYNAMIC)
        if not resp:
            self.log.error("当前页面爬取异常... agency_info={}".format(agency_info))
            return

        self.__parse_detail(resp.text)

    def __parse_detail(self, content):
        resp = html.fromstring(content)
        title = "".join(resp.xpath("//div[@class='title_bg']/h3/text()"))
        name = title.split("统一社会信用代码：")[0].strip().replace("&nbsp", "")
        unified_social_credit_code = title.split("统一社会信用代码：")[1].strip().replace("&nbsp", "")
        registered_address = resp.xpath(u'//td[contains(text(), "登记管理机关")]/following-sibling::td[1]/text()')[0].strip()
        business_manage_unit = resp.xpath(u'//td[contains(text(), "业务主管单位")]/following-sibling::td[1]/text()')[0].strip()
        legal_man = resp.xpath(u'//td[contains(text(), "法定代表人")]/following-sibling::td[1]/text()')[0].strip()
        registered_date = resp.xpath(u'//td[contains(text(), "成立登记日期")]/following-sibling::td[1]/text()')[0].strip()
        business_status = resp.xpath(u'//td[contains(text(), "登记状态")]/following-sibling::td[1]/text()')[0].strip()
        src_registered_capital = resp.xpath(u'//td[contains(text(), "注册资金")]/following-sibling::td[1]/text()')[0].strip()
        website_raw = resp.xpath(u'//td[contains(text(), "网")]/following-sibling::td[1]/text()')[0].strip()
        website = website_raw if website_raw != "http://" else None
        phone_raw = resp.xpath(u'//td[contains(text(), "联系电话")]/following-sibling::td[1]/text()')[0].strip()
        phone = phone_raw if phone_raw != "-" else None
        social_organization_type = resp.xpath(u'//td[contains(text(), "社会组织类型")]/following-sibling::td[1]/text()')[0].strip()
        period = resp.xpath(u'//td[contains(text(), "证书有效期")]/following-sibling::td[1]/text()')[0].strip()
        address = resp.xpath(u'//td[contains(text(), "住")]/following-sibling::td[1]/text()')[0].strip()
        business_scope = resp.xpath(u'//td[contains(text(), "业务范围")]/following-sibling::td[1]/text()')[0].strip()

        model = {
            "company": name,
            "unified_social_credit_code": unified_social_credit_code,
            "registered_address": registered_address,
            "business_manage_unit": business_manage_unit,
            "legal_man": legal_man,
            "registered_date": registered_date,
            "business_status": self.__BUSINESS_STATUS_MAP.get(business_status),
            "src_registered_capital": src_registered_capital,
            "website": website,
            "phone": phone,
            "period": period,
            "business_scope": business_scope,
            "social_organization_type": social_organization_type,
            "address": address,
        }
        # import json
        # print json.dumps(model, ensure_ascii=False)

        in_time = int(time.time())
        self.parse_base_worker.store_model(name, self.__DI_FANG_DETAIL_URL, in_time, model)

        # print name, unified_social_credit_code, \
        #     registered_address, business_manage_unit, legal_man, registered_date, \
        #     src_registered_capital, website, phone, business_status, address, \
        #     business_scope, social_organization_type, period

    def start(self, *args, **kwargs):
        total_page = self.__get_page_data(page_count=0)
        if total_page <= 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(1, total_page):
            try:
                self.log.info("当前采集页面: page = {}".format(page + 1))
                result = self.__get_page_data(page)
                self.log.info("当前页面采集完成: page = {}".format(page + 1))
                # break
            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page + 1))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--before_date',
              default=7,
              type=int,
              help='采集截止时间')
def main(before_date):
    try:
        ChinanpoGsxtDifang(before_date, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
