import re
import time

import requests
from urllib.parse import urlsplit, parse_qsl
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.request import request_fingerprint
from scrapy.utils.project import get_project_settings
from scrapy_redis.defaults import SCHEDULER_DUPEFILTER_KEY

from amac_spider.utils.handler import get_datetime
from amac_spider.utils.ua_pool import get_random_ua
from amac_spider.utils.mysql_connection_pool.server import DBUtilSyncMysqlConnectionPoolMaker, TwistedAsyncMysqlConnectionPoolMaker


class AddHeadersMiddleware(object):

    def process_request(self, request, spider):
        request.headers['User-Agent'] = get_random_ua()


class GetProxyMiddleware(object):

    def __init__(self, settings):
        self.settings = settings

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.settings)

    def process_request(self, request, spider):
        proxy = self.get_proxy(request.url)
        if int(proxy.get('code', 0)) != 1000:
            netloc = urlsplit(request.url).netloc
            netloc = re.sub(r'\w+', 'www', netloc, 1)
            proxy = self.get_proxy(netloc)
        proxy = proxy['data']
        request.meta['proxy'] = 'http://' + proxy

    def get_proxy(self, url):
        proxy_pool_api = self.settings.get('PROXY_POOL_INTERFACE')
        params = {"url": url}
        proxy = requests.get(url=proxy_pool_api, params=params).json()
        return proxy


class MyRetryMiddleware(object):

    def __init__(self, crawler, config):
        self.crawler = crawler
        self.max_retry_times = get_project_settings().getint('RETRY_TIMES')
        self.handler = DBUtilSyncMysqlConnectionPoolMaker(**config)()

    @classmethod
    def from_crawler(cls, crawler):
        config = dict()
        config.setdefault('host', get_project_settings()['MYSQL_HOST'])
        config.setdefault('port', get_project_settings()['MYSQL_POST'])
        config.setdefault('user', get_project_settings()['MYSQL_USER'])
        config.setdefault('password', get_project_settings()['MYSQL_PASSWORD'])
        config.setdefault('db', get_project_settings()['MYSQL_DB'])
        config.setdefault('charset', get_project_settings()['MYSQL_CHARSET'])
        return cls(crawler, config)

    def process_response(self, request, response, spider):
        logger = spider.my_logger
        if response.status == 404:
            logger.error('当前URL：{}响应状态码为404，丢弃该URL'.format(response.url))
            self._save_mysql(request=request, url_type=self._get_url_type(request), name=spider.name, logger=logger, code=404)
            raise IgnoreRequest()
        if response.status != 200:
            reason = '当前URL响应状态码不正确，URL：{}，响应状态码：{}，开始'.format(response.url, response.status)
            return self._retry(request, reason, spider)
        return response

    def process_exception(self, request, exception, spider):
        reason = '当前URL请求发生异常，URL：{}，异常信息为：{}，开始'.format(request.url, exception, spider)
        return self._retry(request, reason, spider)

    def _retry(self, request, reason, spider):
        logger = spider.my_logger
        retries = request.meta.get('retry_times', 0) + 1

        retry_times = self.max_retry_times

        if retries <= retry_times:
            logger.info(reason + '第{}次重试'.format(retries))
            retry_request = request.copy()
            retry_request.meta['retry_times'] = retries
            retry_request.dont_filter = True
            return retry_request
        else:
            logger.error('当前URL重试次数已达到最大，放弃该URL，URL：{}'.format(request.url))
            # url_type = dict(parse_qsl(urlsplit(request.url).query)).get('type', None) or '详情页'
            # 删除指纹，如果不删除指纹，在增量式爬虫中会认为这个URL已经被爬取，但是如果补偿时URL还是失败了，那么这个URL
            # 就不会被爬取到，可以在补偿时，如果成功，则写入指纹，失败也写入指纹
            # self._delete_fingerprint(request, spider)
            # 写入补偿mysql
            self._save_mysql(request=request, url_type=self._get_url_type(request), logger=logger, name=spider.name)
            raise IgnoreRequest()

    def _get_url_type(self, request):
        return dict(parse_qsl(urlsplit(request.url).query)).get('type', None) or 'detail'

    def _save_mysql(self, request, url_type, name, logger, code=None):
        table_name = 'amac_fail_url'
        status_code = code or ''
        fingerprint = request_fingerprint(request)
        items = {
            'url': request.url,
            'fingerprint': fingerprint,
            'spider': name,
            'code': status_code,
            'flag': 0,
            'time': get_datetime(time.time())
        }
        self.handler.upsert(item=items, table_name=table_name, create=True)

    def _delete_fingerprint(self, request, spider):
        key = SCHEDULER_DUPEFILTER_KEY % {'spider': spider.name}
        fingerprint = request_fingerprint(request)
        redis_server = spider.server
        redis_server.srem(key, fingerprint)
