#!/usr/bin/env python 
# coding:utf-8
# @Time :11/5/18 11:30

import copy
import json
import re
import sys

import click
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy import ProxyType

from base import packet

from common import PyBeanstalk
from common import DateUtil

from ext import TaskBase
from logger import AppLogger
from mq import MQFactory
from config.mq_conf import OfflineBeanstalkConf

from libs import thriftobj2bytes

from config.mq_conf import TOPICS

logger = AppLogger('hnaic_penalty.log').get_logger()


class HnaicPenalty(TaskBase):

    __START_URL = "http://gsxt.hnaic.gov.cn:8009/xzcf/CaseDate"
    __DETAIL_URL = "http://gsxt.hnaic.gov.cn:8009/xzcf/CaseList"

    __HOST = "gsxt.hnaic.gov.cn"

    __START_POST_DATA = {
        "cp": None
    }

    def __init__(self, before_date, log):
        super(HnaicPenalty, self).__init__(log)
        self._reset_beanstalk_handler(MQFactory.get_gsxt_clue_beanstalk_handler(log))
        self.__before_date = DateUtil.get_before_day(before_date)
        self.log.info("获得 {} 之后的数据...".format(self.__before_date))

        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)

        self.__set_headers()

    def __set_headers(self):
        headers = {
            "Host": self.__HOST,
            "Origin": "http://" + self.__HOST,
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, html_resp):
        try:
            page_num_href = html_resp.xpath(u'//input[@value="尾页"]/@onclick')[0]
            page_num = re.findall("go\((\d+)\)", page_num_href)[0]
            return int(page_num)
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, page_count=0):

        start_post_data = copy.deepcopy(self.__START_POST_DATA)
        start_post_data["cp"] = page_count

        resp = self._proxy_requests.post(self.__START_URL, data=start_post_data,
                                         proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                         timeout=30)
        if resp is None:
            return -1

        html_resp = html.fromstring(resp.text)

        detail_info_regex = re.findall('onclick="getCaseList\(\'(.*?)\',\'(\d+)\'\)"',  resp.text)

        for per_info in detail_info_regex:
            detail_params = {
                "date": per_info[0],
                "id": per_info[1]
            }
            grab_detail_status = self.__get_detail(detail_params)
            if grab_detail_status < 0 and grab_detail_status is not None:
                self.log.warn("当前爬取详情页异常，请校验...")

            # break

            if per_info[0] <= self.__before_date:
                return 0

        total_page = self.__get_total_page(html_resp)
        return total_page

    def __get_detail(self, detail_params):
        resp = self._proxy_requests.get(self.__DETAIL_URL, params=detail_params,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=30)
        if resp is None:
            return -1

        html_resp = html.fromstring(resp.text)

        detail_info_extract = html_resp.xpath("/html/body/center/table//tr")

        if len(detail_info_extract) < 2:
            self.log.warn("当前解析的详情页面信息有误，请求参数为={}".format(detail_params))
            return -2

        for per_info in detail_info_extract[1:]:
            # _primary_id         = re.findall("openGSXT\('(.*)'\)", per_info.xpath('./td/span/@onclick')[0])[0]
            accused_name        = per_info.xpath('./td/span/text()')[0].strip()
            title               = per_info.xpath('./td[2]/@title')[0]
            execute_authority   = per_info.xpath('./td[3]/text()')[0].strip()
            accused_people      = [accused_name]

            item = {
                "province": "湖南",
                "penalty_time": detail_params["date"],
                "execute_authority": execute_authority,
                "title": "{}({})".format(title, detail_params["date"]),
                "accused_people": accused_people,
                "accused_name": accused_name,
                "case_cause": title,
                "_site_record_id": self.__HOST,
            }

            # print json.dumps(item, ensure_ascii=False)

            self.log.info("accused_people={}".format(accused_people))

            self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
                               thriftobj2bytes(packet(topic_id=TOPICS['penalty'], url=self.__DETAIL_URL, data=item))
                               )

    def start(self, *args, **kwargs):
        self.log.info("开始采集程序...")
        total_page = self.__get_page_data(page_count=1)
        if total_page < 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        if total_page == 0:
            self.log.info("已经采集到截止日期，停止采集")
            return

        for page in xrange(2, total_page + 1):
            try:
                self.log.info("当前采集页面: page = {}".format(page))
                result = self.__get_page_data(page_count=page)
                if result < 0:
                    self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(result))
                    continue

                self.log.info("当前页面采集完成: page = {}".format(page))

                if result == 0:
                    self.log.info("已经采集到截止日期，停止采集")
                    return

            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

            # # debug here
            # break

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--before_date',
              default=15,
              type=int,
              help='采集截止时间')
def main(before_date):
    try:
        HnaicPenalty(before_date, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
