#!/usr/bin/env python 
# coding:utf-8
# @Time :10/8/18 14:30

import copy
import json
import random
import time

from lxml import html
import re

import execjs
import six.moves.urllib as urllib

import requests
import httplib

from common.logger import AppLogger
from service.request_service import RequestService
from common.utils import unicode2str

from common.mongo import MongDb
from config.mongo_conf import CLUE_CRAWL

from config.beanstalk_conf import TOPICS
from config.beanstalk_conf import BEANSTALK_CONF
from config.tube_conf import TUBE_INFO


from common.thrift_object_generator import gen_pageparse_info
from common.thrift_serialize import thriftobj2bytes

from common.log_for_pgsql import LogForPgSql
from config.log_report_conf import LOG_CONF

with open('../resource/vl5x.js') as fp:
    js = fp.read()
    ctx = execjs.compile(js)

with open('../resource/docid.js') as fp:
    js = fp.read()
    ctx2 = execjs.compile(js)


class WenshuSpider(object):
    logger = AppLogger("WenshuSpider.log").get_logger()

    # commons
    __HOST = "wenshu.court.gov.cn"

    __COMMON_HEADERS = {
        'Host': 'wenshu.court.gov.cn',
        'Origin': 'http://wenshu.court.gov.cn',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
    }

    # urls
    __CODE_URL = "http://wenshu.court.gov.cn/ValiCode/GetCode"
    __LIST_URL = "http://wenshu.court.gov.cn/list/list/"
    __SEARCH_URL = "http://wenshu.court.gov.cn/List/ListContent"
    __DETAIL_URL = "http://wenshu.court.gov.cn/CreateContentJS/CreateContentJS.aspx"


    # configs
    __COMMON_HTTP_TIME_OUT = 20

    # retry times
    __MAX_GET_NUMBER = 5
    __MAX_GET_VJKL5 = 5
    __MAX_GET_SEARCH = 5
    __MAX_GET_DETAIL = 5
    __MAX_PUSH_MSG = 3
    __MAX_CONNNET_LOG_REPORT = 3

    # status
    __CASE_NUM_CRAWL_STATUS_MAP = {
        "SUCCESSED": 1,
        "FAILED": 0,
    }

    __SEARCH_STATUS = {
        "INVALID_NAME": -1,
        "NO_RESULT": 2,
    }

    __CRAWL_STATUS = {
        "SUCCEED": 1,
    }

    # mongo table
    __WENSHU_CRAWLED_CASE_TABLE = "wenshu_crawled_case"

    def __init__(self, wenshu_crawled_case_table, beanstalk_handler,  wenshu_company_name_table=None, wenshu_company_collection=None):
        # self.logger.info("WenshuSpider 正在初始化...")
        self.request_service = RequestService(self.logger)
        self.session = requests.Session()
        self.session.proxies = self.request_service.get_dynamic_proxy(self.__HOST)

        self._wenshu_crawled_case_table = wenshu_crawled_case_table

        self._wenshu_company_name_table = wenshu_company_name_table

        self._wenshu_company_name_collection = wenshu_company_collection

        # self.beanstalk_handler = Mqhandle(SCHEDULE_CONF["tube"], self.logger)
        self.beanstalk_handler = beanstalk_handler

        self.__log_report = self.__proxy_log_report()

        # self.__log_report = log_report
        self.__log_batch = LOG_CONF["log_batch"]
        # self.logger.info("WenshuSpider 初始化完毕...")

        # self.lock = lock

    # 爬取页面入口
    def start(self, company_name, page, order, direction):
        self.logger.info("start 开始爬取 {}".format(company_name))
        param = "当事人:%s" % company_name
        total_page = self.grab_search_page(param, page, order, direction)
        if total_page == -1:
            self.logger.warn("start 当前爬取页面存在异常，请校验...")
            return 
        if total_page == 0:
            self.logger.info("start {} 当前页面没有搜索到有效数据...".format(company_name))
            self.__update_company_list(company_name, {"search_status": self.__SEARCH_STATUS["NO_RESULT"]})
            return 
        self.logger.info("start {} 总共存在 {} 页数据".format(company_name, total_page))
        if total_page > 1:
            for i in range(2, total_page + 1):
                result = self.grab_search_page(param, page, order, direction, i)
                if result == -1:
                    self.logger.info("start 当前爬取页面({})存在异常，请校验...".format(i))
                    continue
                if result == 0:
                    self.logger.info("当前页面({})没有搜索到有效数据...".format(i))
                    continue

        self.__update_company_list(company_name, {"crawl_status": self.__CRAWL_STATUS["SUCCEED"]})

    # 爬取搜索结果页
    def grab_search_page(self, param, offset, order, direction, page_num=1):
        company_name = param.split(':')[1]
        self.logger.info("start 当前正在爬取 👉{}👈 的第 ✌️ {} ✌️ 页...".format(company_name, page_num))
        try:
            guid = self.__get_guid()
            number = self.__proxy_get_number(guid)
            vjkl5 = self.__proxy_get_vjkl5(guid, number, param)
            vl5x = self.__get_vl5x(vjkl5)

            form_data = {
                "Param": param,
                "Index": page_num,
                "Page": offset,
                "Order": order,
                "Direction": direction,
                "vl5x": vl5x,
                "number": number,
                "guid": guid
            }

            headers = self.__generate_headers(form_data)

            # 重试请求
            for try_count in range(self.__MAX_GET_SEARCH):
                try:
                    if try_count > 0:
                        self.logger.info("grab_search_data 正在进行第 {} 次重试，公司={}， form_data={}".
                                         format(try_count, company_name, json.dumps(form_data, ensure_ascii=False)))

                    response = self.session.post(self.__SEARCH_URL,
                                                 headers=headers,
                                                 data=form_data,
                                                 timeout=self.__COMMON_HTTP_TIME_OUT)

                    # 请求状态码错误
                    if response.status_code != 200:
                        self.logger.warning("__request_search_info 爬取 ({}) 发现错误状态码({}), 替换代理重新尝试..."
                                            .format(company_name, response.status_code))
                        self.__update_session()
                        form_data = self.__update_search_data_params(form_data)
                        headers = self.__generate_headers(form_data)
                        self.__log_report_info(task_type=self.__log_report.LIST,
                                               task_status=self.__log_report.RETRY,
                                               company=company_name)
                        continue

                    response.encoding = 'utf-8'
                    # print response.text.decode("utf-8")
                    response_result = response.text.encode("utf-8")
                    return_data = response_result.replace('\\', '').replace('"[', '[').replace(']"', ']') \
                        .replace('＆ｌｄｑｕｏ;', '“').replace('＆ｒｄｑｕｏ;', '”')

                    # 出现验证码错误
                    if return_data == '"remind"' or return_data == '"remind key"':
                        self.logger.warn("__request_search_info 爬取({})时出现验证码...正在尝试替换session".format(company_name))
                        self.__update_session()
                        form_data = self.__update_search_data_params(form_data)
                        headers = self.__generate_headers(form_data)
                        self.__log_report_info(task_type=self.__log_report.LIST,
                                               task_status=self.__log_report.RETRY,
                                               company=company_name)

                        httplib.HTTPConnection._http_vsn = 10
                        httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
                        continue

                    # 如果没有发生以上错误时，表示请求正常
                    httplib.HTTPConnection._http_vsn = 11
                    httplib.HTTPConnection._http_vsn_str = 'HTTP/1.1'
                    json_data = unicode2str(json.loads(return_data))

                    if not json_data or json_data[0].get("Count") == "0":
                        # self.logger.info("grab_search_data {} 没有搜索到数据, 此时json={}, 跳过".format(company_name, json_data))
                        self.logger.info("grab_search_data {} 没有搜索到数据, 跳过".format(company_name))
                        return 0

                    # 对当前搜索到的详情页进行提取
                    runeval = json_data[0]['RunEval']
                    for i in range(1, len(json_data)):
                        case_number = json_data[i]['案号'] if '案号' in json_data[i] else ''
                        id = json_data[i]['文书ID'] if '文书ID' in json_data[i] else ''
                        id = self.__decrypt_id(runeval, id)

                        if self.__is_detail_crawled(case_number):
                            self.logger.info(
                                "grab_search_page {} 的 案号为 {} 的信息已爬取...".format(company_name, case_number))
                            continue

                        self.grab_detail_page(id, company_name, case_number)

                    # 返回总页面数
                    page_count = int(json_data[0].get("Count"))
                    total_page = page_count // 20 + 1
                    total_page = total_page if total_page < 10 else 10
                    return total_page

                except BaseException as e:
                    self.logger.error("__request_search_info 请求 {} 时发生错误，此时的参数form_data={}"
                                      .format(company_name, json.dumps(form_data, ensure_ascii=False)))
                    self.logger.exception(e)
                    self.__log_report_info(task_type=self.__log_report.LIST,
                                           task_status=self.__log_report.FAILED,
                                           company=company_name)
                    self.__update_session()
                    form_data = self.__update_search_data_params(form_data)
                    headers = self.__generate_headers(form_data)

        except BaseException as e:
            self.logger.error("grab_search_data bottom error happened!")
            self.logger.exception(e)
            # self.record_crawl_status(company_name, -1)
            return -1

    # 爬取文书详情页
    def grab_detail_page(self, id, company_name, case_number):
        self.logger.info("grab_detail_page 开始采集 {} 案号为 {} 的文书详情信息...".format(company_name, case_number))
        url = "?".join([self.__DETAIL_URL, "DocID={}".format(id)])
        response = None
        for try_count in range(self.__MAX_GET_DETAIL):
            if try_count > 0:
                self.logger.warn("grab_detail_page {} 当前正在进行第 {} 次重试...".format(company_name, try_count))
            try:
                response = self.session.get(url, headers=self.__COMMON_HEADERS, timeout=self.__COMMON_HTTP_TIME_OUT)
                if response.status_code != 200:
                    self.logger.info("grab_detail_page 获得错误状态码({}), 重试...".format(response.status_code))
                    self.__update_session()
                    continue
                break

            except BaseException as e:
                self.logger.debug('grab_detail_page 请求链接时发生错误，错误原因是：')
                self.logger.exception(e)

        if not response:
            self.logger.error("grab_detail_page 在请求 {} 详情链接时无法拿到数据，详情链接为 {}，已尝试Max({})次但无法拿到数据".format(company_name, url,
                                                                                                 self.__MAX_GET_DETAIL))
            return

        response.encoding = 'utf-8'
        return_data = response.text.replace('\\', '')

        if "此篇文书不存在!" in return_data:
            self.logger.info("grab_detail_page 文书信息不存在...")
            return

        try:
            court_title = re.findall(r'\"Title\"\:\"(.*?)\"', return_data)[0]
        except BaseException as e:
            self.logger.warning("grab_detail_page 获取不了court_title字段，return_data is >>>>{} error={}".format(return_data, e))
            self.logger.exception(e)
            return
        court_date = re.findall(r'\"PubDate\"\:\"(.*?)\"', return_data)[0]
        court_content = re.findall(r'\"Html\"\:\"(.*?)\"', return_data)[0]
        try:
            content = self.__extract_content(court_content)
        except BaseException as e:
            self.logger.error("不能正确解析内容信息，return_data={}, id={}".format(return_data, id))
            self.logger.exception(e)
            return

        data = {'url': url,
                'doc_id': id,
                "case_name": court_title,
                "bulletin_date": court_date,
                "_site_record_id": id,
                "doc_content": content
                }

        # self.logger.info(json.dumps(data, ensure_ascii=False))

        obj = gen_pageparse_info(url, data, topic_id=TOPICS["judgement_wenshu"])
        obj = thriftobj2bytes(obj)

        self.__proxy_put_beanstalk(obj)

        # message_queue.push_to_queue(obj)
        self.__log_report_info(task_type=self.__log_report.DETAIL,
                               task_status=self.__log_report.SUCCESS,
                               company=company_name)

        detail_data = {
            "detail_url": url,
            "case_number": case_number,
            "company": company_name,
            "crawl_flag": self.__CASE_NUM_CRAWL_STATUS_MAP["SUCCESSED"],
            "_in_time": time.strftime("%Y-%m-%d %H:%M:%S"),
            "crawl_time": time.strftime("%Y-%m-%d %H:%M:%S"),
        }

        self.logger.info("当前采集到 >>> 案号为【{}】的文书信息...".format(case_number))
        self.__save_detail_to_mongo(detail_data)

    def __get_guid(self):
        def crate_guid():
            return str(hex((int(((1 + random.random()) * 0x10000)) | 0)))[3:]

        return '{}{}-{}-{}{}-{}{}{}' \
            .format(
            crate_guid(), crate_guid(),
            crate_guid(), crate_guid(),
            crate_guid(), crate_guid(),
            crate_guid(), crate_guid())

    def __proxy_get_number(self, guid):
        for i in range(self.__MAX_GET_NUMBER):
            if i > 0:
                self.logger.warning("__proxy_get_number 正在进行第 {} 次重试, 此时的guid为[{}]".format(i, guid))
            resp_status_code, resp = self.__get_number(guid)
            if resp_status_code == -1:
                self.logger.warning("proxy_get_number 请求异常，请重试.")
                self.__update_session()
                continue

            if resp_status_code != 200:
                self.logger.warning("__proxy_get_number 获得错误状态码，此时状态码为({}).".format(resp_status_code))
                self.__update_session()
                continue
            return resp.text

    def __get_number(self, guid):
        form_data = {'guid': guid}
        headers = copy.deepcopy(self.__COMMON_HEADERS)
        headers.update({'Referer': 'http://wenshu.court.gov.cn/', 'X-Requested-With': 'XMLHttpRequest'})
        response = None
        try:
            response = self.session.post(self.__CODE_URL,
                                         data=form_data,
                                         headers=headers,
                                         timeout=self.__COMMON_HTTP_TIME_OUT)

        except BaseException as why:
            self.logger.warning("__get_number 时发生request请求错误，错误原因是={}".format(why))
            self.logger.exception(why)
            return -1, response

        return response.status_code, response

    def __proxy_get_vjkl5(self, guid, number, param):
        for i in range(self.__MAX_GET_VJKL5):
            if i > 0:
                self.logger.warning("__proxy_get_vjkl5 正在进行第 {} 次重试, 此时的guid为[{}], number为[{}], param为[{}]"
                                    .format(i, guid, number, param))
            resp = self.__get_vjkl5(guid, number, param)
            if resp == -1:
                self.logger.warning("__proxy_get_vjkl5 请求 __get_vjkl5 时发生异常，请校验...")
                self.__update_session()
                guid = self.__get_guid()
                continue
            return resp

    def __get_vjkl5(self, guid, number, param):
        params = {
            "sorttype": 1,
            "number": number,
            "guid": guid,
            "conditions": "searchWord+QWJS+++" + urllib.parse.quote(param)
        }

        headers = copy.deepcopy(self.__COMMON_HEADERS)
        headers.update({"Proxy-Connection": "keep-alive", "Upgrade-Insecure-Requests": "1"})

        try:
            response = self.session.get(self.__LIST_URL,
                                        params=params,
                                        headers=headers,
                                        timeout=self.__COMMON_HTTP_TIME_OUT)
        except BaseException as why:
            self.logger.error("__get_vjkl5 时发生request请求错误，错误原因是={}".format(why))
            self.logger.exception(why)
            return -1

        if response.status_code != 200:
            self.logger.warning("__get_vjkl5 发送请求时状态出错，此时的错误状态码是({}).".format(response.status_code))
            return -1

        try:
            vjkl5 = response.cookies["vjkl5"]
        except BaseException as why:
            self.logger.error("__get_vjkl5 解析cookies是发生错误:")
            self.logger.exception(why)
            return -1

        return vjkl5

    def __get_vl5x(self, vjkl5):
        vl5x = (ctx.call('GetVl5x', vjkl5))
        self.logger.info("__get_vl5x 获得vjkl5值，此时的值为: {}".format(vjkl5))
        return vl5x

    def __decrypt_id(self, runeval, id):
        js = ctx2.call("GetJs", runeval)
        js_objs = js.split(";;")
        js1 = js_objs[0] + ';'
        js2 = re.findall(r"_\[_\]\[_\]\((.*?)\)\(\);", js_objs[1])[0]
        key = ctx2.call("EvalKey", js1, js2)
        key = re.findall(r"\"([0-9a-z]{32})\"", key)[0]
        docid = ctx2.call("DecryptDocID", key, id)
        return docid

    def __update_session(self):
        self.session = requests.session()
        self.session.proxies = self.request_service.get_dynamic_proxy(self.__HOST)

    def __update_search_data_params(self, data):
        guid = self.__get_guid()
        number = self.__proxy_get_number(guid)
        param = data.get("Param")
        vjkl5 = self.__proxy_get_vjkl5(guid, number, param)
        vl5x = self.__get_vl5x(vjkl5)
        data['vl5x'] = vl5x
        data['number'] = number
        data['guid'] = guid
        return data

    def __generate_headers(self, data):
        number, guid, param = data.get('number'), data.get("guid"), data.get("Param")
        refer_request_params = urllib.parse.urlencode({"sorttype": 1,
                                                       "number": number,
                                                       "guid": guid,
                                                       "conditions": "searchWord+QWJS+++" + urllib.parse.quote(param)})
        refer_url = "?".join([self.__LIST_URL, refer_request_params])
        headers = copy.deepcopy(self.__COMMON_HEADERS)
        headers.update({"Referer": refer_url})
        return headers

    def __extract_content(self, content_raw):
        content_html = html.fromstring(content_raw)
        content = ''
        for per_content_loc in content_html.xpath('//*[text()]'):
            content += per_content_loc.xpath('./text()')[0]
        return content

    def __is_detail_crawled(self, case_number):
        exists = self._wenshu_crawled_case_table.find_one(self.__WENSHU_CRAWLED_CASE_TABLE, {"case_number": case_number})
        return True if exists else False

    def __save_detail_to_mongo(self, detail_data):
        case_number = detail_data["case_number"]
        if self.__is_detail_crawled({"case_number": case_number}):
            self.logger.info("case_number({})已经爬取，不进行存储...".format(case_number))
            return
        self._wenshu_crawled_case_table.insert(self.__WENSHU_CRAWLED_CASE_TABLE, detail_data)

    def __update_company_list(self, company_name, item):
        if self._wenshu_company_name_table:
            item["search_status_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            item["crawl_status_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            self._wenshu_company_name_table.update(self._wenshu_company_name_collection,
                                                   {"_id": company_name}, {"$set": item})

    def __proxy_put_beanstalk(self, body):
        try:
            self.beanstalk_handler.put_msg(body)

        except Exception as e:
            self.logger.warn("__proxy_put_beanstalk 发送数据到消息队列失败，错误原因是:")
            self.logger.exception(e)

        # for count in range(self.__MAX_PUSH_MSG):
        #     try:
        #         # self.logger.info("__proxy_put_beanstalk 当前要发送的数据是{}".format(body))
        #         is_push_succeed = self.beanstalk_handler.put_msg(body)
        #         if not is_push_succeed:
        #             self.logger.warn("发送消息队列失败...")
        #             continue
        #     except Exception as e:
        #         self.logger.error("发送到消息队列发生异常: ")
        #         self.logger.exception(e)

    def __proxy_log_report(self):
        for i in range(self.__MAX_CONNNET_LOG_REPORT):
            try:
                self.__log_report = LogForPgSql("crawler_log_table")
                return self.__log_report
            except BaseException as e:
                self.logger.warn("__proxy_log_report 连接 LogForPgSql 发生错误...")
                self.logger.exception(e)
        self.logger.error("__proxy_log_report 重试连接 LogForPgSql 最大次数({})，但依然无法获取数组...".format(self.__MAX_CONNNET_LOG_REPORT))
        return None

    def __log_report_info(self, company=None, task_type=None, task_status=None):
        if self.__log_batch:
            if company is None or task_status is None or task_type is None:
                self.logger.error("__log_report_info miss necessary info.Company={}, "
                                  "task_type={}, task_status={}".format(company, task_type, task_status))
                raise BaseException

            data = self.__load_log_report(company, task_type, task_status)
            if not isinstance(data, dict):
                self.logger.error("__log_report_info data must be dict. Please check it again")
                raise Exception
            # self.lock.acquire()
            self.__log_report.record(**data)
            # self.lock.release()

    def __load_log_report(self, company, task_type, task_status):
        datas = {"application_id": LOG_CONF["log_app_id"],
                 "batch_info": self.__log_batch,
                 "task_type": task_type,
                 "task_status": task_status,
                 "task_param": {"company": company,
                                "topic_id": TOPICS["judgement_wenshu"]}}
        return datas


if __name__ == '__main__':
    wenshu_spider = WenshuSpider()
    # wenshu_spider.grab_search_data("当事人: 滴滴", 20, "法院层级", "asc")
    wenshu_spider.start("绍兴市工贸国有资本经营有限公司", 20, "法院层级", "dsc")

