#!/usr/bin/env python 
# coding:utf-8
# @Time :11/3/18 16:03

import copy
import json
import re
import sys
import time
import string

import click
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy import ProxyType

from base import packet

from common import PyBeanstalk

from ext import TaskBase
from logger import AppLogger

from config.mq_conf import OfflineBeanstalkConf

from libs import thriftobj2bytes

from config.mq_conf import TOPICS

from retrying import retry

from service.img_recognition_service import BaiduImgRecognitionService

logger = AppLogger('gdcourts_zhixing.log').get_logger()


class GdcourtsZhixing(TaskBase):
    # base
    __HOST = "www.gdcourts.gov.cn"

    # -------------------------------------------- 分割线 --------------------------------------------

    # urls
    __START_URL = "http://www.gdcourts.gov.cn/web/search?action=gotoajxxcx"
    __TOKEN_URL = "http://www.gdcourts.gov.cn/common/getToKenTempPutCk"
    __DETAIL_URL_DEMO = "http://www.gdcourts.gov.cn/web/search?action=ajxxxq&ajid={ajid}&ah=&dsr=&pageNum={page}"

    __CAPTCHA_URL = "http://www.gdcourts.gov.cn/common/random_codeById/{}-"
    __CAPTCHA_RECOGNITION_URL = "http://192.168.0.94:5555/captcha"

    # -------------------------------------------- 分割线 --------------------------------------------

    # params

    __SEARCH_FORM_DATA = {
        'page_randomcode': None,
        'pageNum': None,
        'fjm': 'J00',
        'token_key': None,
        'ajlx': 'zx',
        'page_randomcode_submit': None,
        'csToken': None
    }

    __FIRST_PAGE_PARAMS = {
        "action": "gotoajxxcx",
        "ajlx": "zx",
        "flag": "first"
    }

    # -------------------------------------------- 分割线 --------------------------------------------

    def __init__(self, page_need_spider, log):
        super(GdcourtsZhixing, self).__init__(log)
        self.page_need_spider = page_need_spider
        self.log.info("获得 {} 页的数据...".format(self.page_need_spider if self.page_need_spider != 0 else "全量"))
        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)
        self.__set_headers()
        self.token_key = None
        self.captcha = None
        self.__first_timestamp = None

        self.baidu_img_recg_service = BaiduImgRecognitionService(log)

    def __set_headers(self):
        headers = {
            "Host": self.__HOST,
            "Referer": "http://www.gdcourts.gov.cn/web/search?action=gotoajxxcx&ajlx=zx&flag=first",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, html_resp):
        try:
            page_num = html_resp.xpath('//b[@id="bsumpage"]/text()')[0]
            return int(page_num) if self.page_need_spider == 0 else self.page_need_spider
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __init_captcha_token(self):
        resp = self._proxy_requests.get(self.__START_URL,
                                        params=self.__FIRST_PAGE_PARAMS,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)

        # crack_captcha = str(self.__decode_captcha())
        # 拿去上面一个验证码
        self.__decode_captcha()
        # 第二次为查询验证码
        self.captcha = str(self.__decode_captcha(use_twice=True))

        self.token_key = str(re.findall('.*tokenKey":"(\d+)"', resp.text)[0])

    def __get_page_data(self, page_count=1):

        search_form_data = copy.deepcopy(self.__SEARCH_FORM_DATA)
        cs_token = str(self.__get_token())

        search_form_data["csToken"] = cs_token
        search_form_data["pageNum"] = str(page_count)
        search_form_data["token_key"] = self.token_key
        search_form_data["page_randomcode"] = self.captcha.upper()
        search_form_data["page_randomcode_submit"] = str(self.__first_timestamp)

        resp = self._proxy_requests.post(self.__START_URL,
                                         data=search_form_data,
                                         proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                         timeout=10)
        if resp is None:
            return -1

        if resp.json().get("msg") == "验证码错误！":
            return -2

        # print json.dumps(resp.json(), ensure_ascii=False)
        page_info = resp.json()["pageinfo"]
        for per_case_info in page_info["list"]:
            ajid = per_case_info.get("AJID")
            detail_url = self.__DETAIL_URL_DEMO.format(ajid=ajid, page=page_count)
            self.__get_detail_page(detail_url)

        total_page = int(page_info["totalPage"]) if self.page_need_spider == 0 else self.page_need_spider
        return total_page

    def __get_detail_page(self, detail_url):
        # print detail_url
        # if not detail_url == "http://www.gdcourts.gov.cn/web/search?action=ajxxxq&ajid=2550181007000190&ah=&dsr=&pageNum=23":
        #     return
        resp = self._proxy_requests.get(detail_url,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)

        if resp is None:
            return -1

        html_resp = html.fromstring(resp.text)

        info_valid = html_resp.xpath("//div[@id='a1']")

        if not info_valid:
            self.log.warn("解析详情页信息失败，当前detail={}".format(detail_url))
            return -1

        case_id = html_resp.xpath('//h2/text()')[0].strip()
        case_date = html_resp.xpath(u"//td[text()='立案日期']/following-sibling::td[1]/text()")[0]
        case_state = html_resp.xpath(u"//td[text()='案件进度']/following-sibling::td[1]/text()")[0]
        exec_money_raw = html_resp.xpath(u"//td[text()='标的金额']/following-sibling::td[1]/text()")[0].replace("元", "")
        if exec_money_raw:
            exec_money = float(exec_money_raw)
        else:
            exec_money = None

        i_name_raws = html_resp.xpath(u"//td[text()='当事人']/following-sibling::td[1]/text()")

        if i_name_raws:
            i_name_raws_hand = filter(lambda x: len(x) > 0, map(lambda x: re.findall(ur".*被执行人:(.*)", x), i_name_raws))
            i_name_parse = ",".join(map(lambda x: x[0], i_name_raws_hand))
            # 当事人不存在"被执行人: "的情况，则使用原来的字段
            remove_regex = re.compile(r'[\n\r\t]')
            i_name = i_name_parse if i_name_parse else ",".join(filter(lambda y: y != " ", map(lambda x: remove_regex.sub('', x), i_name_raws)))
        else:
            i_name = None

        item = {
            "province": "广东",
            "court": "广东省高级人民法院",
            "case_id": case_id,
            "i_name": i_name,
            # "doc_content": doc_content,
            "case_date": case_date,
            "case_state": case_state,
            "exec_money": exec_money,
            "money_unit": "元",
            "_site_record_id": self.__HOST,
            "unique_id": detail_url
        }

        # print json.dumps(item, ensure_ascii=False)

        self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
                           thriftobj2bytes(packet(topic_id=TOPICS['zhixing_info'], url=detail_url, data=item))
                           )

    def __get_token(self):
        token_resp = self._proxy_requests.post(self.__TOKEN_URL,
                                               data={"tokenKey": self.token_key},
                                               proxy_type=ProxyType.YIZHOU_DYNAMIC
                                               )

        token = token_resp.json().get('tokenVal')
        return token

    def __decode_captcha(self, retry_times=0, use_twice=False):
        retry_times += 1
        self.log.info("__decode_captcha try {} times".format(retry_times))
        time_now = GdcourtsZhixing.get_current_time()
        self.__first_timestamp = time_now if use_twice else None
        url = self.__CAPTCHA_URL.format(time_now)
        # print "__decode_captcha>>> before_cookies:", self._proxy_requests._ProxyRequests__session.cookies.get_dict()
        result = self.__handle_company_name(url)

        return result

    @retry(stop_max_attempt_number=7)
    def __handle_company_name(self, url):
        img_resp = self._proxy_requests.get(url, proxy_type=ProxyType.KUNPENG_DYNAMIC)
        img_look = self.baidu_img_recg_service.discern(img_resp.content).replace(" ", "")
        if len(img_look) != 4:
            raise BaseException
        return img_look

    @staticmethod
    def get_current_time():
        return int(round(time.time() * 1000))

    @retry(stop_max_attempt_number=7)
    def start(self, *args, **kwargs):
        self.log.info("开始采集程序...")

        self.log.info("开始采集并初始化验证码请求模块...")
        self.__init_captcha_token()

        total_page = self.__get_page_data(page_count=1)

        if total_page == -2:
            self.log.warn("验证码提取错误...")
            raise BaseException

        if total_page < 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(2, total_page + 1):
            try:
                self.log.info("当前采集页面: page = {}".format(page))
                result = self.__get_page_data(page_count=page)
                if result < 0:
                    self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(result))
                    continue
                self.log.info("当前页面采集完成: page = {}".format(page))

            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

            # debug here
            # break

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--page_need_spider',
              # 0代表采集全量数据
              default=15,
              type=int,
              help='采集截止页数')
def main(page_need_spider):
    try:
        GdcourtsZhixing(page_need_spider, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
