#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: gsxt_detail_worker_crawl.py
@time: 2018/1/3 17:46
"""
import random
import time

import gevent.pool
from beanstalkc import SocketError
from gevent import monkey

from cache.record_cache import RecordCache
from common import util
from common.pybeanstalk import PyBeanstalk
from config.app_conf import PROVINCE_LIST, PROVINCE_HOST_DICT, CRAWL_TABLE_CONF
from config.redis_conf import GS_RECORD_CACHE_CONF
from crawler.detail_page_crawler import DetailPageCrawler

monkey.patch_all()


# 详情页抓取worker类
class DetailWorker(object):
    def __init__(self, thread_num, input_tube,
                 store_tube, process_id,
                 beanstalk_conf, log):
        self.log = log
        self.input_tube = input_tube

        # 开启的协程数目
        self.thread_num = thread_num

        # 记录当前正在运行的线程数目
        self.current_queue_num = 0

        # 最大排队数目
        self.max_queue_num = self.thread_num + 5

        # 进程ID
        self.process_id = process_id

        # 初始化协程池
        self.pool = gevent.pool.Pool(self.thread_num)

        # 初始化监听消息队列
        self.beanstalk = PyBeanstalk(beanstalk_conf['host'], beanstalk_conf['port'])

        # host指针
        self.host_point = random.randint(0, len(PROVINCE_LIST) - 1)

        # 抓取记录缓存初始化
        self.record_cache_redis = RecordCache(GS_RECORD_CACHE_CONF['host'],
                                              GS_RECORD_CACHE_CONF['port'],
                                              GS_RECORD_CACHE_CONF['password'],
                                              GS_RECORD_CACHE_CONF['db'],
                                              GS_RECORD_CACHE_CONF['max_connections'], log=log)

        # 初始化详情页抓取功能
        self.detail_crawler = DetailPageCrawler(parse_flag=CRAWL_TABLE_CONF['result_table']['parse_flag'],
                                                crawl_flag=CRAWL_TABLE_CONF['seed_table']['crawl_flag'],
                                                store_tube=store_tube,
                                                is_init_source_db=False,
                                                is_init_target_db=False, log=log)
        self.log.info("初始化详情页抓取worker完成: process_id = {}".format(process_id))

    # 判断排队数目是否已经满了
    def is_queue_full(self):

        if self.current_queue_num < self.max_queue_num:
            return

        start_time = time.time()

        while True:
            if self.current_queue_num >= self.max_queue_num:
                end_time = time.time()
                if end_time - start_time >= 10:
                    self.log.info("当前worker正在等待协程调度: process_id = {}".format(self.process_id))
                    start_time = end_time
                time.sleep(0.5)
                continue
            break

    def result_callback(self, result):
        self.current_queue_num -= 1

    # 获取抓取host
    def get_crawl_host(self):

        if self.host_point >= len(PROVINCE_LIST):
            self.host_point = 0

        host = PROVINCE_HOST_DICT[PROVINCE_LIST[self.host_point]]
        self.host_point += 1
        return host

    # 更新抓取记录信息
    def get_gs_crawl_record(self, province, company):
        # 获得value
        return self.record_cache_redis.get_gs_crawl_record(province, company)

    def start(self):
        self.log.info("启动详情页抓取,等待消费数据: process_id = {}".format(self.process_id))

        count = 0
        while True:
            try:
                # 判断是否排队已经满了
                self.is_queue_full()

                job = self.beanstalk.reserve(self.input_tube, 3)
                if job is None:
                    continue

                body = job.body
                job.delete()
                count += 1

                param_data = util.json_loads(body)
                if param_data is None:
                    self.log.error('数据格式错误: msg = {msg}'.format(msg=body))
                    time.sleep(5)
                    continue

                # 选取host
                host = self.get_crawl_host()
                param_data['host'] = host

                # 网页库表 根据省份进行计算
                province = param_data.get('province')
                if province is None:
                    continue

                # 企业名称，列表页搜索出来的 search_name 字段
                company = param_data.get('company')
                if company is None:
                    continue

                # 抓取记录从redis中加载
                crawl_record_dict = self.get_gs_crawl_record(province, company)

                # 启动协程
                self.pool.apply_async(self.detail_crawler.crawl_process,
                                      args=(self.process_id, param_data, crawl_record_dict),
                                      callback=self.result_callback)

                # 排队执行数目增加
                self.current_queue_num += 1
                self.log.info('当前数据索引: process_id = {} index = {} task run = {}'.format(
                    self.process_id, count, self.current_queue_num))
            except SocketError as e:
                time.sleep(5)
                self.beanstalk.reconnect()
                self.log.warn("reconnect beanstalk...")
                self.log.exception(e)
            except Exception as e1:
                self.log.error("消费数据错误: ")
                self.log.exception(e1)
                time.sleep(1)
