#!/usr/bin/env python 
# coding:utf-8
# @Time :11/19/18 15:00

import copy
import json
import re
import sys

import click
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy import ProxyType

from base import packet

from common import PyBeanstalk

from ext import TaskBase
from logger import AppLogger

from config.mq_conf import OfflineBeanstalkConf

from libs import thriftobj2bytes

from config.mq_conf import TOPICS

logger = AppLogger('gdcourts_wenshu.log').get_logger()


class GdcourtsWenshu(TaskBase):

    # base
    __HOST = "www.gdcourts.gov.cn"

    # -------------------------------------------- 分割线 --------------------------------------------

    # urls
    __START_URL = "http://www.gdcourts.gov.cn/web/cpws?action=gotowsxxcx&flag=first"
    __TOKEN_URL = "http://www.gdcourts.gov.cn/common/getToKenTempPutCk"

    __LIST_URL = "http://www.gdcourts.gov.cn/web/cpws?action=gotowsxxcx"
    __DETAIL_URL_DEMO = "http://www.gdcourts.gov.cn/web/cpws?action=wsxxxq&wsid={}&fjm=J00"

    # -------------------------------------------- 分割线 --------------------------------------------

    # params
    __SEARCH_FORM_DATA = {
        'pageNum': None,
        'fjm': 'J00',
        'token_key': None,
        'csToken': None
    }

    def __init__(self, page_need_spider, log):
        super(GdcourtsWenshu, self).__init__(log)
        self.page_need_spider = page_need_spider
        self.log.info("获得 {} 页的数据...".format(self.page_need_spider if self.page_need_spider != 0 else "全量"))
        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)
        self.__set_headers()
        self.token_key = None

    def __set_headers(self):
        headers = {
            "Host": self.__HOST,
            "Referer": "http://www.gdcourts.gov.cn/web/search?action=gotoajxxcx&ajlx=zx&flag=first",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        }
        self._proxy_requests.set_header(headers)

    def __get_page_data(self, page_count=1):

        search_form_data = copy.deepcopy(self.__SEARCH_FORM_DATA)
        cs_token = str(self.__get_token())

        search_form_data["csToken"] = cs_token
        search_form_data["pageNum"] = page_count
        search_form_data["token_key"] = self.token_key

        resp = self._proxy_requests.post(self.__LIST_URL,
                                         data=search_form_data,
                                         proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                         timeout=10)

        if resp is None:
            return -1

        pageinfo = resp.json()["pageinfo"]

        info_list = pageinfo["list"]

        for per_info in info_list:
            case_id = per_info.get("AH")
            url_id = per_info.get("WSID")
            case_name = per_info.get("SWWSMC")
            court = per_info.get("FYMC")
            case_cause = per_info.get("AY")
            litigants = per_info.get("DSR")
            bulletin_date = per_info.get("CREATETIME")

            detail_url = self.__DETAIL_URL_DEMO.format(url_id)

            item = {"province": "广东",
                    "case_name": case_name,
                    "case_id": case_id,
                    "court": court,
                    "case_cause": case_cause,
                    "litigants": litigants,
                    "bulletin_date": bulletin_date,
                    "doc_id": detail_url,
                    "_site_record_id": detail_url
                    }

            self.__get_detail_page(detail_url, item)

        total_page = int(pageinfo["totalPage"]) if self.page_need_spider == 0 else self.page_need_spider
        return total_page

    def __get_first_token(self):

        resp = self._proxy_requests.get(self.__START_URL,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)

        self.token_key = str(re.findall('.*tokenKey":"(\d+)"', resp.text)[0])

    def __get_detail_page(self, detail_url, item):

        resp = self._proxy_requests.get(detail_url,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)

        html_resp = html.fromstring(resp.text)

        doc_content = "".join(html_resp.xpath("//div[@class='main']//span/text()"))

        item["doc_content"] = doc_content

        # print json.dumps(item, ensure_ascii=False)

        self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
                           thriftobj2bytes(packet(topic_id=TOPICS['judgement_wenshu'], url=detail_url, data=item))
                           )

    def __get_token(self):
        token_resp = self._proxy_requests.post(self.__TOKEN_URL,
                                               data={"tokenKey": self.token_key},
                                               proxy_type=ProxyType.YIZHOU_DYNAMIC
                                               )

        token = token_resp.json().get('tokenVal')
        return token

    def start(self, *args, **kwargs):
        self.log.info("开始采集程序...")

        self.log.info("开启请求首页，获得初始token key...")
        self.__get_first_token()
        self.log.info("初始token key获取完成...")

        total_page = self.__get_page_data(page_count=1)
        if total_page < 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(2, total_page + 1):
            try:
                self.log.info("当前采集页面: page = {}".format(page))
                result = self.__get_page_data(page_count=page)
                if result < 0:
                    self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(result))
                    continue
                self.log.info("当前页面采集完成: page = {}".format(page))

            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--page_need_spider',
              # 0代表采集全量数据
              default=15,
              type=int,
              help='采集截止页数')
def main(page_need_spider):
    try:
        GdcourtsWenshu(page_need_spider, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
