#!/usr/bin/env python 
# coding:utf-8
# @Time :11/2/18 17:43


import copy
import json
import re
import sys

import click
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy import ProxyType

from base import packet

from common import PyBeanstalk
from common.tools import get_md5

from ext import TaskBase
from logger import AppLogger
from mq import MQFactory
from config.mq_conf import OfflineBeanstalkConf

from libs import thriftobj2bytes

from config.mq_conf import TOPICS

logger = AppLogger('yjjcfy_zhixing.log').get_logger()


class YjjcfyZhixing(TaskBase):

    __START_URL = "http://www.yjjcfy.gov.cn/index.aspx"
    __MIN_ZHENG_DETAIL_URL = "http://www.chinanpo.gov.cn/search/vieworg.html"

    __HOST = "www.yjjcfy.gov.cn"

    __GET_PARAMS = {
        "lanmuid": 62,
        "sublanmuid": 615,
        "page": 1
    }

    def __init__(self, page_need_spider, log):
        super(YjjcfyZhixing, self).__init__(log)
        self._reset_beanstalk_handler(MQFactory.get_gsxt_clue_beanstalk_handler(log))
        self.page_need_spider = page_need_spider
        self.log.info("获得 {} 页的数据...".format(self.page_need_spider if self.page_need_spider != 0 else "全量"))

        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)

        self.__set_headers()

    def __set_headers(self):
        headers = {
            "Host": self.__HOST,
            "Origin": "http://" + self.__HOST,
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, html_resp):
        try:
            page_num_href = html_resp.xpath('//a[@class="lastpage"]/@href')[0]
            page_num = re.findall(".*page=(\d+)", page_num_href)[0]
            return int(page_num) if self.page_need_spider == 0 else self.page_need_spider
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, page_count=0):
        get_params = copy.deepcopy(self.__GET_PARAMS)
        get_params["page"] = page_count

        resp = self._proxy_requests.get(self.__START_URL, params=get_params,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)
        if resp is None:
            return -1

        html_resp = html.fromstring(resp.text)

        info_extract = html_resp.xpath("//tr[contains(@class, 'item')]")

        for per_info_extract in info_extract:
            case_id = per_info_extract.xpath('./td[1]/a/text()')[0].strip()

            party_person = per_info_extract.xpath('./td[2]//text()')
            i_name = party_person[0].replace("被执行人：", "")
            doc_content = " ".join(party_person)
            case_date = per_info_extract.xpath('./td[5]/text()')[0].strip()
            case_state = per_info_extract.xpath('./td[6]/text()')[0].strip()

            case_id_md5 = get_md5(case_id)

            detail_url = self.__START_URL + "?" + case_id_md5

            item = {
                "province": "广东",
                "court": "阳江市江城区人民法院",
                "case_id": case_id,
                "i_name": i_name,
                "doc_content": doc_content,
                "case_date": case_date,
                "case_state": case_state,
                "_site_record_id": case_id_md5,
                "unique_id": detail_url
            }

            # print json.dumps(item, ensure_ascii=False)

            self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
                               thriftobj2bytes(packet(topic_id=TOPICS['zhixing_info'], url=detail_url, data=item))
                               )

        total_page = self.__get_total_page(html_resp)
        return total_page

    def start(self, *args, **kwargs):
        self.log.info("开始采集程序...")
        total_page = self.__get_page_data(page_count=1)
        if total_page < 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(1, total_page + 1):
            try:
                self.log.info("当前采集页面: page = {}".format(page))
                result = self.__get_page_data(page_count=page)
                if result < 0:
                    self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(result))
                    continue
                self.log.info("当前页面采集完成: page = {}".format(page))
            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--page_need_spider',
              # 0代表采集全量数据
              default=0,
              type=int,
              help='采集截止页数')
def main(page_need_spider):
    try:
        YjjcfyZhixing(page_need_spider, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
