#!/usr/bin/env python 
# coding:utf-8
# @Time :11/7/18 10:56


import copy
import json
import re
import sys
import urllib

import click
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy import ProxyType

from base import packet

from common import PyBeanstalk
from common.tools import get_md5

from ext import TaskBase
from logger import AppLogger
from mq import MQFactory
from config.mq_conf import OfflineBeanstalkConf

from libs import thriftobj2bytes

from config.mq_conf import TOPICS

logger = AppLogger('ssfw_common_zhixing.log').get_logger()



init_site_info_map = {
    "肇庆市端州区": ["http://ssfw.zqdzfy.gov.cn/zxxx{}.aspx", "ssfw.zqdzfy.gov.cn"],
    "肇庆市怀集县": ["http://ssfw.gdhjfy.gov.cn/zxxx{}.aspx", "ssfw.gdhjfy.gov.cn"]
}


class SsfwCommonZhixing(TaskBase):

    __GET_PARAMS = {
        "cateId": 21,
    }

    def __init__(self, page_need_spider, area, log):
        super(SsfwCommonZhixing, self).__init__(log)

        self.log.info("此时正在初始化 {} 的信息".format(area))

        self.page_need_spider = page_need_spider
        self.log.info("获得 {} 页的数据...".format(self.page_need_spider if self.page_need_spider != 0 else "全量"))
        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)
        self.__start_url_demo = init_site_info_map[area][0]
        self.__host = init_site_info_map[area][1]
        self.__set_headers()

        self.log.info("信息初始化完毕")

    def __set_headers(self):
        headers = {
            "Host": self.__host,
            "Origin": "http://" + self.__host,
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, html_resp):
        try:
            page_num_href = html_resp.xpath('//a[@class="lastpage"]/@href')[0]
            page_num = re.findall(".*page=(\d+)", page_num_href)[0]
            return int(page_num) if self.page_need_spider == 0 else self.page_need_spider
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, page_count=1):
        get_params = copy.deepcopy(self.__GET_PARAMS)
        if page_count > 1:
            get_params["page"] = page_count

        resp = self._proxy_requests.get(self.__start_url_demo.format(""), params=get_params,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)
        if resp is None:
            return -1

        html_resp = html.fromstring(resp.text)

        info_extract = html_resp.xpath("//table[@id='tbData']//tr")[1:]

        for per_info_extract in info_extract:
            detail_id_raw = per_info_extract.xpath('./@onclick')[0]
            detail_id = re.findall(".*?ajid=(\d+)", detail_id_raw)[0]

            self.__get_detail(detail_id)

            # break

        # total_page = self.__get_total_page(html_resp)
        # return total_page

    def __get_detail(self, detail_id):
        detail_params = copy.deepcopy(self.__GET_PARAMS)
        detail_params['ajid'] = detail_id

        resp = self._proxy_requests.get(self.__start_url_demo.format("view"),
                                        params=detail_params,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)

        if resp is None:
            self.log.error("当前页面爬取异常... detail_url={}".format(detail_url))
            return -1

        html_resp = html.fromstring(resp.text)

        case_id = html_resp.xpath(u'//td[text()="案号："]/following-sibling::td[1]/text()')[0].strip()
        case_date = html_resp.xpath(u'//td[text()="立案时间："]/following-sibling::td[1]/text()')[0].strip()
        case_state = html_resp.xpath(u'//td[text()="案件状态："]/following-sibling::td[1]/text()')[0].strip()
        court = html_resp.xpath('//div[@class="footer_left"]/a/text()')[0].strip()

        party_raw = html_resp.xpath(u'//td[text()="当事人："]/following-sibling::td[1]/text()')[0].split("  ")

        party = ",".join(map(lambda y: y.replace(u"被执行人：", ""), filter(lambda x: u"被执行人：" in x, party_raw)))

        # case_id_md5 = get_md5(case_id)
        # detail_url = self.__start_url_demo.format("view") + "?" + case_id_md5

        detail_url = "?".join([self.__start_url_demo.format("view"), urllib.urlencode(detail_params)])

        item = {
            "province": "广东",
            "court": court,
            "case_id": case_id,
            "i_name": party,
            "case_date": case_date,
            "case_state": case_state,
            "_site_record_id": case_id,
            "unique_id": detail_url
        }

        print json.dumps(item, ensure_ascii=False)

        #     self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
        #                        thriftobj2bytes(packet(topic_id=TOPICS['zhixing_info'], url=detail_url, data=item))
        #                        )


    def start(self, *args, **kwargs):
        self.log.info("开始采集程序...")

        total_page = self.__get_page_data(page_count=1)
        if total_page < 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(1, total_page + 1):
            try:
                self.log.info("当前采集页面: page = {}".format(page))
                result = self.__get_page_data(page_count=page)
                if result < 0:
                    self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(result))
                    continue
                self.log.info("当前页面采集完成: page = {}".format(page))
            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--page_need_spider',
              # 0代表采集全量数据
              default=0,
              type=int,
              help='采集截止页数')
def main(page_need_spider):
    for per_area in ["肇庆市怀集县", "肇庆市端州区"]:
        try:
            SsfwCommonZhixing(page_need_spider, per_area, logger)()
        except Exception as e:
            logger.error("采集异常退出: ")
            logger.exception(e)


if __name__ == '__main__':
    main()