#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/5/16 15:08
# @Author  : Leixu
# @Site    : 
# @File    : TornadoBaseSpider.py
# @Software: PyCharm Community Edition
from abc import ABCMeta

from tornado.httpclient import HTTPResponse

from luobo.component.params_deliver import ParamsDeliver
from luobo.component.params_deliver.redis_deliver import RedisDeliver
from luobo.spider.base_spider import BaseSpider, Launcher, Setting, BrowserSpider, WorkLauncher, ScriptRunMode
from luobo.spider.utils.feeder import UrlFeeder, CrawlerFeeder
from luobo.utils.random_string import random_string
import asyncio


class TornadoBaseSpider(BaseSpider):
    counter = 0

    def __init__(self):
        super().__init__()
        self.setting = Setting()

        # 每次发射两个
        self.setting.URL_NUMBERS_ONE_TIME = 2
        self.setting.ENABLE_CHECK_URL_REPEAT = False

        # 请求线程个数
        self.setting.PARALLEL_WORKER_NUMBERS = 100

        self.crawler_setting = {
            "random_ua": True,
            "delay_enable": True,
            "delay_random_enable": True,
            "delay_random_range": "1~9",
            "delay": 1
        }
        self.request_setting = {
            "method": "GET",
            "headers": {},
            "body": None,
            "connect_timeout": 1 * 60,
            "request_timeout": 1 * 60,
            "validate_cert": False
        }
        self.launcher = Launcher(self.setting, self)
        # 装载url加料器
        # 改参数主要是将 将url加载到发射器中
        self.url_feeder = UrlFeeder(self.launcher)

    def start_crawl(self):
        # 加载 url
        self.start_url()
        # 开始爬取
        self.start()

    def check_setting(self):
        if isinstance(self.request_setting, dict):
            raise TypeError("requets_setting must be a dict")
        if isinstance(self.crawler_setting, dict):
            raise TypeError("crawler_setting must be a dict")

    def start(self, response: HTTPResponse = None):
        self.crawler_setting["request_config"] = self.request_setting
        _f_list = []
        for _ in range(self.setting.PARALLEL_WORKER_NUMBERS):
            # Thread(target=self.launcher.begin, args=(self.crawler_setting,)).start()
            _f_list.append(asyncio.ensure_future(self.launcher.begin(self.launcher, self.crawler_setting)))
        asyncio.get_event_loop().run_until_complete(asyncio.gather(*_f_list))

    def start_url(self):
        pass

    def process_data(self, response):
        pass


class SeleniumBaseSpider(BrowserSpider):
    def __init__(self):
        super().__init__()
        self.setting = Setting()
        # 任务执行器，这个主要负责发送执行任务
        self.launcher: WorkLauncher = WorkLauncher(self.setting, self)
        # 同时开启任务数
        self.setting.PARALLEL_WORKER_NUMBERS = 10
        self.params_deliver: ParamsDeliver = None
        # 这个参数是crawler的加料器
        # 初始化的时候，直接加载任务执行器
        self.crawler_feeder = CrawlerFeeder(self.launcher)

    def start_crawler(self):
        pass

    def param_input(self, s_type="redis", key=None):
        if not key:
            key = random_string()
        if s_type == "redis":
            self.params_deliver = RedisDeliver(key)

    def add_params(self):
        pass

    def error_handler(self):
        pass

    def response_output(self):
        pass

    def start_crawl(self):
        self.param_input()

        async def add_params():
            self.add_params()

        asyncio.ensure_future(add_params())
        self.start_crawler()
        self.start()

    def start(self):
        _f_list = []
        for _ in range(self.setting.PARALLEL_WORKER_NUMBERS):
            if self.RUN_MODE == ScriptRunMode.LOCAL:
                _f_list.append(asyncio.ensure_future(self.launcher.begin_local(self.launcher, self)))
            else:
                _f_list.append(asyncio.ensure_future(self.launcher.begin(self.launcher, self)))
        asyncio.get_event_loop().run_until_complete(asyncio.gather(*_f_list))
        print("all done")


class CrawlerRuler(metaclass=ABCMeta):
    def __init__(self):
        self._rule_index = None
        self._script = None
        self._script_path = None
        self._config = {}

    def load_script(self, _script=None):
        self._script_path = _script
        with open(_script, "r", encoding="utf8") as f:
            self._script = f.read()

    def get_script_context(self):
        return self._script

    def get_script_path(self):
        return self._script_path

    def get_config(self):
        return self._config

    @property
    def rule_index(self):
        return self._rule_index

    @rule_index.setter
    def rule_index(self, value):
        self._rule_index = value


class CustomCrawlerRuler(CrawlerRuler):
    def __init__(self):
        super(CustomCrawlerRuler, self).__init__()
        self._config["type"] = "custom"


class AsyncCrawlerRuler(CrawlerRuler):
    def __init__(self):
        super(AsyncCrawlerRuler, self).__init__()
        self._config["type"] = "async"
