#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence 
@file: task_base_worker.py
@time: 2016/12/7 16:17
"""
import os
import random
import time
from StringIO import StringIO

import requests

from common import util
from common.captcha_local import CaptchaLocal
from common.mongo import MongDb
from common.proxy_local import ProxyLocal
from common.queue_mq_thread import MqQueueThread
from common.util import get_pid_log_name
from config.conf import mongo_db_target, mongo_db_source, remote_proxy_conf, captcha_server_conf
from logger import Gsxtlogger


class TaskBaseWorker(object):
    #  -2 公司名称太短, -1 公司名称不符合规格, 0 代表抓取失败 1 代表已经抓完了 2 剔除搜索过没有信息的关键字
    CRAWL_SHORT_NAME = -2
    CRAWL_INVALID_NAME = -1
    CRAWL_UN_FINISH = 0
    CRAWL_FINISH = 1
    CRAWL_NOTHING_FIND = 2

    # 过期时间
    EXPIRATION_TIME = 3600 * 24 * 7

    SUCCESS = "SUCCESS"
    FAIL = "FAIL"
    USER_AGENT_LIST = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 "
        "Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 "
        "Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

    # 静态代理配置文件相对路径
    proxies_path = '/config/proxies_200.txt'

    def __init__(self, **kwargs):
        # 获得项目根路径
        self.base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

        self.host = kwargs.get('host', '')
        logfile = kwargs.get('logfile', '')
        self.province = kwargs.get('province', '')
        self.target_table = kwargs.get('target_table', '')
        self.source_table = kwargs.get('source_table', '')
        self.crawl_flag = kwargs.get('crawl_flag', 'source_web')
        self.beanstalk_conf = eval(kwargs.get('beanstalk_conf', 'None'))
        self.is_mq_open = kwargs.get('is_mq_open', True)
        self.search_table = kwargs.get('search_table', None)
        self.log = Gsxtlogger(get_pid_log_name(logfile)).get_logger()

        self.proxy_local = ProxyLocal(proxies_file=self.base_path + self.proxies_path, log=self.log)
        self.captchaLocal = CaptchaLocal(log=self.log)
        self.target_db = MongDb(mongo_db_target['host'], mongo_db_target['port'], mongo_db_target['db'],
                                mongo_db_target['username'],
                                mongo_db_target['password'], log=self.log)

        self.source_db = MongDb(mongo_db_source['host'], mongo_db_source['port'], mongo_db_source['db'],
                                mongo_db_source['username'],
                                mongo_db_source['password'], log=self.log)

        # 开启消息队列线程
        self.mq_queue_thread = MqQueueThread(
            server_conf=self.beanstalk_conf, log=self.log, is_open=self.is_mq_open)
        self.mq_queue_thread.start()

    def __del__(self):
        del self.source_db
        del self.target_db
        self.mq_queue_thread.close()
        self.mq_queue_thread.join(10)
        del self.mq_queue_thread

    # 设置原始表中关键字搜索状态
    #  -2 公司名称太短, -1 公司名称不符合规格, 2 剔除搜索过没有信息的关键字 1 代表已经抓完了
    def set_crawl_flag(self, item, crawl_flag, flag):
        # 获得当前时间
        cur_time = util.get_now_time()
        time_key = crawl_flag + '_time'

        # 更新字段信息
        item[time_key] = cur_time
        item[crawl_flag] = flag

        # 更新信息
        self.source_db.save(self.source_table, item)

    # 判断是否需要抓取 True 需要抓取 False 不需要抓取
    def check_crawl_flag(self, item, crawl_flag):

        # 如果没有抓取记录, 则需要抓取
        if crawl_flag not in item:
            return True

        # 如果连时间字段都没有, 则说明还没有抓过
        time_key = crawl_flag + '_time'
        pre_time = item.get(time_key, None)
        if pre_time is None:
            return True

        # 如果时间超过两天 则需要重新抓取
        cur_time = util.get_now_time()
        if util.sub_time(cur_time, pre_time) >= self.EXPIRATION_TIME:
            self.log.info('cur_time = {cur} pre_time = {pre} _id = {_id}'.format(
                cur=cur_time, pre=pre_time, _id=item.get('_id', '')))
            return True

        # 获得抓取标记
        flag = item.get(crawl_flag)
        # 如果在过期时间内已经完成抓取, 则不再抓取
        if flag == self.CRAWL_FINISH:
            return False

        # 过期时间内搜索没有数据的 也不需要再搜索
        if flag == self.CRAWL_NOTHING_FIND:
            return False

        return True

    # 拉取验证码的url
    def get_captcha_url(self, session):
        pass

    def query_company(self, company_name):
        pass

    def query_task(self, item):
        pass

    def task_request(self, requester, url, proxy_type=2, retry=3, **kwargs):
        kwargs['proxies'] = self.get_proxy(proxy_type=proxy_type)
        kwargs['timeout'] = kwargs.get('timeout', 20)
        total_start_time = time.time()
        start_time = 0
        for _ in xrange(retry):
            try:
                start_time = time.time()
                result = requester(url=url, **kwargs)
                end_time = time.time()
                if result.status_code == 200:
                    if not util.check_html(result.text):
                        self.log.error('无效用户: url = {url}'.format(
                            url=url))
                        return None
                    return result
                self.log.warn('代理状态码异常: status_code = {code} url = {url} used_time = {used}s'.format(
                    code=result.status_code, url=url, used=end_time - start_time))
            except Exception as e:
                end_time = time.time()
                kwargs['proxies'] = self.get_proxy(proxy_type=proxy_type)
                self.log.warn(
                    '代理访问异常: url = {url} used_time = {used}s msg = {msg}'.format(
                        url=url, used=end_time - start_time, msg=e.message))

        try:
            kwargs['proxies'] = self.get_proxy(proxy_type=2)
            start_time = time.time()
            result = requester(url=url, **kwargs)
            end_time = time.time()
            if result.status_code == 200:
                if not util.check_html(result.text):
                    self.log.error('无效用户: url = {url}'.format(
                        url=url))
                    return None
                return result
            self.log.warn('静态代理状态码异常status_code = {code} url = {url} used_time = {used}s'.format(
                code=result.status_code, url=url, used=end_time - start_time))
        except Exception as e:
            end_time = time.time()
            self.log.warn('静态代理访问异常: url = {url} used_time = {used}s msg = {msg}'.format(
                url=url, used=end_time - start_time, msg=e.message))

        total_end_time = time.time()
        self.log.error('访问失败: url = {url} total time = {total}s'.format(
            url=url, total=total_end_time - total_start_time))
        return None

    def get_new_session(self, host=None, proxy_type=2):
        session = requests.session()
        if host is None:
            host = self.host

        session.proxies = self.get_proxy(host=host, proxy_type=proxy_type)
        session.headers['User-Agent'] = self.USER_AGENT_LIST[random.randint(0, len(self.USER_AGENT_LIST) - 1)]
        return session

    # 更改为默认先使用本地代理服务
    def get_proxy(self, host=None, proxy_type=2, ssl=False):
        ssl_type = 'http' if not ssl else 'https'
        proxies = None
        if host is None:
            host = self.host
        try:
            if proxy_type == 1:
                r = requests.get(
                    'http://{host}:{port}/proxy/{h}'.format(
                        h=host, host=remote_proxy_conf['host'], port=remote_proxy_conf['port']),
                    timeout=3)
                if r is None or r.status_code != 200:
                    self.log.error('访问远程代理失败了..')
                    proxies = {ssl_type: self.proxy_local.get_local_proxy()['http']}
                else:
                    proxies = {ssl_type: 'http://{host}'.format(host=r.text)}
            elif proxy_type == 2:
                proxies = {ssl_type: self.proxy_local.get_local_proxy()['http']}
        except Exception as e:
            proxies = {ssl_type: self.proxy_local.get_local_proxy()['http']}
            self.log.exception(e)
        return proxies

    # 根据省份调用东海的验证码服务
    # 返回 成功 - 结果 失败 - False
    def get_captcha(self, session, cap_type=1, proxy_type=2):
        if cap_type == 1:
            return self.__get_captcha_haizhi(session, proxy_type=proxy_type)
        elif cap_type == 2:
            return self.__get_captcha_other(session, proxy_type=proxy_type)
        elif cap_type == 3:
            return self.__get_captcha_local(session, proxy_type=proxy_type)
        return None, None

    def __get_captcha_local(self, session, proxy_type=2):
        for _ in xrange(5):
            captcha_url = self.get_captcha_url(session)
            r = self.task_request(session.get, captcha_url, proxy_type=proxy_type)
            if r is None:
                continue
            try:
                text = r.text.encode('ISO-8859-1')
                if text.find('访问验证') != -1:
                    self.log.warn('需要验证码校验, 休眠5S')
                    time.sleep(5)
                    continue
            except:
                pass

            result = self.captchaLocal.get_captcha_result(self.province, r.content)
            if result is None:
                continue

            return result, None

        return None, None

    def __get_captcha_other(self, session, proxy_type=2):
        for _ in xrange(3):
            captcha_url = self.get_captcha_url(session)
            r = self.task_request(session.get, captcha_url, proxy_type=proxy_type)
            if r is None:
                continue

            post_file = {"cap_image": r.content}
            data = dict(province=self.province)
            try:
                result = requests.post('http://182.61.40.11:8080', files=post_file, data=data, timeout=20)
                if result is None or result.status_code != 200:
                    continue
                return result.content, None
            except Exception as e:
                self.log.exception(e)

        return None, None

    def __get_captcha_haizhi(self, session, proxy_type=2):
        captcha_server_url = "http://{host}:{port}/get_captcha".format(
            host=captcha_server_conf['host'], port=captcha_server_conf['port'])

        captcha_result = None
        cap_content = ''
        for _ in xrange(3):
            try:
                captcha_url = self.get_captcha_url(session)
                r = self.task_request(session.get, captcha_url, proxy_type=proxy_type)
                if r is None:
                    continue
                cap_content = r.content
                post_image = {'captcha': StringIO(cap_content)}
                post_data = {
                    'province': self.province
                }

                result = requests.post(captcha_server_url, data=post_data, files=post_image, timeout=20)
                if result is None or result.status_code != 200:
                    continue

                result_json = util.json_loads(result.content)
                if result_json is None:
                    continue

                status = int(result_json['status'])
                # 识别失败
                if status is not 0:
                    self.captcha_report_failed(status, cap_content)
                    continue
                captcha_result = str(result_json['result'])
                break
            except Exception as e:
                self.log.exception(e)
        return captcha_result, cap_content

    def __captcha_report(self, status, result, image_data):
        report_url = "http://{host}:{port}/report".format(
            host=captcha_server_conf['host'], port=captcha_server_conf['port'])
        post_data = {
            'status': status,
            'result': result,
            'province': self.province
        }
        post_image = {'captcha': StringIO(image_data)}
        requests.post(report_url, data=post_data, files=post_image, timeout=20)

    def captcha_report_failed(self, result, image_data):
        self.__captcha_report('failed', result, image_data)

    def captcha_report_succeed(self, result, image_data):
        self.__captcha_report('ok', result, image_data)

    def captcha_report_error(self, result, image_data):
        self.__captcha_report('error', result, image_data)
