#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: crawler_handler.py
@time: 2017/12/20 10:42
"""

import json
import random
import time

import requests
from fake_useragent import UserAgent
from pyquery import PyQuery

from common.mongo import MongDb
from config.app_conf import JS_DECODE_URL, PROVINCE_REGISTER_CODE, CRAWL_DETAIL_FAIL, CRAWL_DETAIL_SUCCESS, \
    CRAWL_DETAIL_PART_SUCCESS
from config.beanstalk_conf import BEANSTALK_CONF
from config.mongo_conf import MONGO_DB_SOURCE, MONGO_DB_TARGET
from config.proxy_conf import DYNAMIC_PROXY_CONF, STATIC_PROXY_URL
from ext.beanstalk_handler import BeanstalkHandler
from ext.proxy_handler import ProxyHandler
from model.http_error import HttpError
from model.mq_model import MqModel


class CrawlerHandler(object):
    USER_AGENT_LIST = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 "
        "Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 "
        "Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

    # 最大重试次数
    MAX_TRY_TIMES = 3

    # 存储消息队列最大阻塞数目
    MAX_STORE_BLOCK_NUM = 5000

    # 失败则为 0
    FAIL = 0
    # 成功则为 1
    SUCCESS = 1

    def __init__(self, store_tube, log, is_init_source_db=True, is_init_target_db=True):
        self.log = log

        try:
            self.log.info("准备初始化user_agent...")
            self.ua = UserAgent()
            self.log.info("初始化user_agent完成...")
        except:
            self.ua = None

        self.log.info("初始化抓取对象...")

        # 代理管理
        self.__proxy_handler = ProxyHandler(dynamic_proxy_conf=DYNAMIC_PROXY_CONF,
                                            static_proxy_url=STATIC_PROXY_URL,
                                            log=log)

        # 搜索列表存储表
        if is_init_source_db:
            self.__source_db = MongDb(MONGO_DB_SOURCE['host'], MONGO_DB_SOURCE['port'], MONGO_DB_SOURCE['db'],
                                      MONGO_DB_SOURCE['username'],
                                      MONGO_DB_SOURCE['password'], log=log)
            self.log.info("工商种子库句柄初始化完成...")
        else:
            self.__source_db = None
            self.log.info("工商种子库未打开，不进行初始化...")

        # 存储结果
        if is_init_target_db:
            self.__target_db = MongDb(MONGO_DB_TARGET['host'], MONGO_DB_TARGET['port'], MONGO_DB_TARGET['db'],
                                      MONGO_DB_TARGET['username'],
                                      MONGO_DB_TARGET['password'], log=log)
            self.log.info("工商网页库句柄初始化完成...")
        else:
            self.__target_db = None
            self.log.info("工商网页库未打开，不进行初始化...")

        # 存储消息队列，相对固定
        self.__store_tube = store_tube

        # 初始化通用消息队列
        self.__beanstalk_handler = BeanstalkHandler(BEANSTALK_CONF, log=log)
        self.__beanstalk_handler.start()

        self.__beanstalk_store_handler = BeanstalkHandler(BEANSTALK_CONF,
                                                          max_block=self.MAX_STORE_BLOCK_NUM,
                                                          log=log)
        self.__beanstalk_store_handler.start()

        self.log.info("抓取对象初始化完成...")

    # 比较是否是两个相同的企业
    @staticmethod
    def compare_company(origin_company, company):
        replace_company1 = company.replace('(', '（').replace(')', '）')
        replace_company2 = company.replace('（', '(').replace('）', ')')
        if replace_company1 == origin_company or replace_company2 == origin_company:
            return 0

        return -1

    # 获得浏览器信息
    def get_user_agent(self):
        if self.ua is None:
            return self.USER_AGENT_LIST[random.randint(0, len(self.USER_AGENT_LIST) - 1)]

        return str(self.ua.random)

    # 获得session
    def get_session(self, host):
        session = requests.Session()
        session.headers['User-Agent'] = self.get_user_agent()
        session.proxies = self.__proxy_handler.get_random_proxy(host)
        return session

    # 更换代理
    def switch_proxy(self, session, host):
        session.proxies = self.__proxy_handler.get_random_proxy(host)

    # 请求url
    def task_request(self, session, host, func, url, retry=3, **kwargs):
        kwargs['timeout'] = kwargs.get('timeout', 30)
        total_start_time = time.time()

        for _ in xrange(retry):
            start_time = time.time()
            reason = ''
            while True:
                try:
                    result = func(url=url, **kwargs)
                    if result.status_code == 403:
                        break

                    if result.status_code != 200 and result.status_code != 521:
                        reason = 'status_code = {}'.format(result.status_code)
                        break

                    return result
                except Exception as e:
                    self.log.exception(e)
                break

            end_time = time.time()

            # 打印代理异常信息
            self.log.warn('访问异常: proxy = {}  url = {} {} cost_time = {}s'.format(
                session.proxies,
                url,
                reason,
                end_time - start_time))

            # 切换代理
            self.switch_proxy(session, host)

        total_end_time = time.time()
        self.log.error('访问失败: url = {} kwargs = {} total time = {}s'.format(
            url, json.dumps(kwargs, ensure_ascii=False), total_end_time - total_start_time))
        return None

    # 获得种子 或者参数信息 iter
    def get_iterator_list(self, table_name, source_select_param, field=None):
        if self.__source_db is None:
            return None

        return self.__source_db.traverse_batch_field(table_name, source_select_param, field=field)

    # 创建索引
    def create_source_index(self, table_name, crawl_flag):
        if self.__source_db is not None:
            self.__source_db.create_index(table_name, [(crawl_flag, MongDb.ASCENDING)])

    @staticmethod
    def __delete_blank_text(text):
        target_src = ''
        for i in text:
            if 0 < ord(i) < 128:
                target_src += i
        # self.log.info("拼接后的字符串: target_src = {}".format(target_src))
        return target_src

    # 破解js 获取cookie
    def get_js_cookie(self, text, host):
        text = text.strip()
        # text = text.replace(" ", '')

        text1 = text.replace('</script>', '')
        text2 = text1.replace('script', '')
        text3 = text2.replace('<>', '')
        text4 = text3.replace(r'eval(y.replace(/\b\w+\b/g, function(y){return x[f(y,z)-1]}));',
                              r'result=y.replace(/\b\w+\b/g, function(y){return x[f(y,z)-1]});')

        text5 = self.__delete_blank_text(text4)

        try:
            resp = requests.post(JS_DECODE_URL, data={'code': text5, 'host': '{}/'.format(host)}, timeout=10)
            if resp.status_code != 200:
                return None
            json_data = json.loads(resp.text)
            if json_data['status'] == 'fail':
                return None

            cookie = json_data['result'].split(';')[0]
            return cookie
        except Exception as e:
            self.log.info("请求破解失败:")
            self.log.exception(e)
        return None

    # 请求url 不切换代理
    def task_request_without_proxy(self, session, func, url, **kwargs):
        kwargs['timeout'] = kwargs.get('timeout', 5)

        start_time = time.time()
        while True:
            try:
                result = func(url=url, **kwargs)
                if result.status_code == 403:
                    break

                if result.status_code != 200 and result.status_code != 521 and result.status_code != 400:
                    break

                return result
            except Exception as e:
                self.log.exception(e)
            break

        end_time = time.time()

        # 打印代理异常信息
        self.log.warn('访问异常: proxy = {}  url = {} cost_time = {}s'.format(
            session.proxies,
            url,
            end_time - start_time))

        return None

    # 带cookie访问
    def request_with_cookie(self, session, host, func, url, **kwargs):
        resp = self.task_request_without_proxy(session, func, url=url, **kwargs)
        if resp is None:
            return None

        if resp.status_code == 200:
            return resp

        if resp.status_code != 521:
            return resp

        # 请求破解js
        cookie = self.get_js_cookie(resp.text, host)
        if cookie is None:
            return None

        key, value = cookie.split("=")
        session.cookies[key] = value
        session.headers['Referer'] = url

        # time.sleep(1)

        resp = self.task_request_without_proxy(session, func, url=url, **kwargs)
        if resp is None:
            return None

        # 必须要返回200才算正确返回
        if resp.status_code == 200:
            return resp

        return None

    # 重试抓取详情页
    def request_retry(self, session, host, func, first_url, **kwargs):
        try_times = 0
        error = HttpError.NO_ERROR
        while try_times < self.MAX_TRY_TIMES:
            try_times += 1
            resp = self.request_with_cookie(session, host, func, first_url, **kwargs)
            if resp is None:
                self.log.info("当前resp = None重试抓取: try_times = {} max_times = {} url = {}".format(
                    try_times, self.MAX_TRY_TIMES, first_url))
                self.switch_proxy(session, host)
                # time.sleep(1.5)
                continue

            # 如果字符串是空的，则直接重试
            if resp.text == '':
                self.log.info("当前text为空字符串重试抓取: try_times = {} max_times = {} url = {}".format(
                    try_times, self.MAX_TRY_TIMES, first_url))
                self.switch_proxy(session, host)
                # time.sleep(1.5)
                continue

            try:
                if PyQuery(resp.text, parser='html').find('title').text() == '404':
                    return resp, HttpError.NOT_FOUND
            except Exception as e:
                self.log.error("解析404失败:")
                self.log.exception(e)
                self.log.info("当前解析404失败重试抓取: try_times = {} max_times = {} url = {}".format(
                    try_times, self.MAX_TRY_TIMES, first_url))
                continue

            return resp, error

        self.log.warn("当前重试超次数: try_times = {} max_times = {} url = {}".format(
            try_times, self.MAX_TRY_TIMES, first_url))
        return None, error

    # 通过注册号计算省份
    @staticmethod
    def cal_province(register_code):

        if not isinstance(register_code, basestring):
            return 'gsxt'

        if len(register_code) == 15 and register_code[0:2] in PROVINCE_REGISTER_CODE:
            return PROVINCE_REGISTER_CODE[register_code[0:2]]

        if len(register_code) == 18 and register_code[2:4] in PROVINCE_REGISTER_CODE:
            return PROVINCE_REGISTER_CODE[register_code[2:4]]

        if len(register_code) == 13 and register_code[0:2] in PROVINCE_REGISTER_CODE:
            return PROVINCE_REGISTER_CODE[register_code[0:2]]

        return 'gsxt'

    # 判断企业是否合法
    @staticmethod
    def is_company_invalid(company):
        if '%' in company or '#' in company:
            return True
        return False

    # 获得cookie字典
    @staticmethod
    def get_cookie_dict(session):
        cookie_dict = {}
        for key, value in session.cookies.iteritems():
            cookie_dict[key] = value

        return cookie_dict

    # 存储种子
    def save_seed_info(self, table_name, seed_model):
        mq_model = MqModel()
        mq_model.append_seed(table_name, seed_model)
        self.__beanstalk_store_handler.push_msg(self.__store_tube, mq_model.get_store_model())

    # 批量存储种子信息
    def save_seed_batch(self, table_name, seed_list):
        for seed_model in seed_list:
            self.save_seed_info(table_name, seed_model)

        return len(seed_list)

    # 存储详情页网页库信息
    def save_detail_page_info(self, table_name, web_page_info):

        mq_model = MqModel()
        mq_model.append_detail_page(table_name, web_page_info)
        self.__beanstalk_store_handler.push_msg(self.__store_tube, mq_model.get_store_model())

    # 批量存储详情页网页库信息
    def save_detail_page_batch(self, table_name, web_page_list):

        for web_page_model in web_page_list:
            self.save_detail_page_info(table_name, web_page_model)

        return len(web_page_list)

    # 存储列表页网页库信息
    def save_list_page_info(self, table_name, web_page_info):

        mq_model = MqModel()
        mq_model.append_list_page(table_name, web_page_info)
        self.__beanstalk_store_handler.push_msg(self.__store_tube, mq_model.get_store_model())

    # 批量存储列表页网页库信息
    def save_list_page_batch(self, table_name, web_page_list):

        for web_page_model in web_page_list:
            self.save_list_page_info(table_name, web_page_model)

        return len(web_page_list)

    # 查找网页库
    def find_web_page_one(self, table_name, filter_param):

        if self.__target_db is None:
            self.log.error("当前网页库没有初始化, 不进行存储: table_name = {}".format(table_name))
            return None

        return self.__target_db.find_one(table_name, filter_param)

    # 发送到存储消息队列
    def send_store_beanstalk(self, store_model):
        # 如果是字典 直接发送
        if isinstance(store_model, dict):
            self.__beanstalk_store_handler.push_msg(self.__store_tube, store_model)
            return

        # 如果是列表 则一个一个发送
        if isinstance(store_model, list):
            for model in store_model:
                self.__beanstalk_store_handler.push_msg(self.__store_tube, model)

    # 发送数据到消息队列 发送消息队列单独入口
    def send_beanstalk(self, tube, msg_model):

        # 如果是字典 直接发送
        if isinstance(msg_model, dict):
            self.__beanstalk_handler.push_msg(tube, msg_model)
            return

        # 如果是列表 则一个一个发送
        if isinstance(msg_model, list):
            for model in msg_model:
                self.__beanstalk_handler.push_msg(tube, model)

    # 获得工商抓取状态
    @staticmethod
    def get_crawl_status(success_count, full_success_count):
        # 如果没有成功的
        if success_count <= 0:
            return CRAWL_DETAIL_FAIL

        # 如果都是成功
        if success_count >= full_success_count:
            return CRAWL_DETAIL_SUCCESS

        return CRAWL_DETAIL_PART_SUCCESS
