# -*- coding: utf-8 -*-

__author__ = 'bitfeng'
# Define your spidermiddlware here
# 注意代码的通用性和优美

import logging
import datetime
from scrapy import signals
from scrapy.http import Request
from scrapy.exceptions import IgnoreRequest
from db_connect import MySQLConnect, PostgreSQLConnect


class UpdateProcessUrlsStatus(object):
    """数据库记录request和response的状态情况"""

    @classmethod
    def from_crawler(cls, crawler):
        midd = cls(crawler.settings)
        crawler.signals.connect(midd.close_spider, signals.spider_closed)
        crawler.signals.connect(midd.open_spider, signals.spider_opened)
        return midd

    def __init__(self, settings):
        db = settings.get('DB_USE')['SPIDERMIDDLEWARE']
        if db == 'POSTGRESQL':
            self.conn = PostgreSQLConnect.createConn(settings.get('POSTGRESQL_URI'))
        else:
            self.conn = MySQLConnect.createConn(settings.get('MYSQL_URI'))
        self.html_storage = settings.get('HTML_STORAGE_ENABLED')

    def open_spider(self, spider):
        self.process_urls_tablename = getattr(spider, 'process_urls_tablename', 'process_urls')

    def close_spider(self, spider):
        self.conn.close()

    # 从数据库获取start_urls
    def process_start_requests(self, start_requests, spider):
        if not getattr(spider, 'start_urls', ''):
            start_urls_tablename = getattr(spider, 'start_urls_from', 'start_urls')
            cursor = self.conn.cursor()
            cursor.execute("SELECT tablename FROM pg_catalog.pg_tables")
            if ('%s' % start_urls_tablename,) not in cursor.fetchall():
                logging.INFO('[ERROR] Table %s does not Exist' % start_urls_tablename)
                exit(0)
            else:
                cursor.execute("""select urls from "%s" where spidername='%s' and rank='start' and status=200"""
                               % (start_urls_tablename, spider.name)
                               )
                urls_set = cursor.fetchall()
                if urls_set:
                    return (Request(url[0]) for url in cursor.fetchall())
                else:
                    logging.INFO('[ERROR] There is no start_urls in table %s' % start_urls_tablename)
                    exit(0)
        else:
            def _record_refer(r):
                cursor = self.conn.cursor()
                if isinstance(r, Request):
                    cursor.execute("""insert into "%s"(add_time, url, spidername, refer) values('%s','%s','%s','%s')"""
                                   % (self.process_urls_tablename,
                                      datetime.datetime.now(),
                                      r.url,
                                      spider.name,
                                      # 是否存储html页面，默认为disable
                                      'None')
                                   )
                self.conn.commit()
                return r
            return (_record_refer(r) for r in start_requests or ())

    # 生成Request后，记录add_time,url,post_raw,spidername和referer
    def process_spider_output(self, response, result, spider):
        def _record_refer(r):
            cursor = self.conn.cursor()
            if isinstance(r, Request):
                cursor.execute("""insert into "%s"(add_time, url, spidername, refer) values('%s','%s','%s','%s','%s')"""
                               % (self.process_urls_tablename,
                                  datetime.datetime.now(),
                                  r.url,
                                  str(r.body),
                                  spider.name,
                                  response.url)
                               )
            self.conn.commit()
            return r

        return (_record_refer(r) for r in result or ())

    # 返回response后， 记录update_time、rank、status、和html
    def process_spider_input(self, response, spider):
        html_storage = getattr(spider, 'html_storage', self.html_storage)
        cursor = self.conn.cursor()
        cursor.execute("""update "%s" set update_time='%s', rank='%s', status='%s', html='%s' where url='%s'"""
                       % (self.process_urls_tablename,
                          datetime.datetime.now(),
                          'rank',
                          response.status,
                          # 是否存储html页面，默认为disable
                          response.body if html_storage else '',
                          response.url)
                       )
        self.conn.commit()
        return


class HttpError(IgnoreRequest):
    """A non-200 response was filtered"""

    def __init__(self, response, *args, **kwargs):
        self.response = response
        super(HttpError, self).__init__(*args, **kwargs)


class HttpErrorMiddleware(object):

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.settings)

    def __init__(self, settings):
        self.handle_httpstatus_all = settings.getbool('HTTPERROR_ALLOW_ALL')
        self.handle_httpstatus_list = settings.getlist('HTTPERROR_ALLOWED_CODES')

    def process_spider_input(self, response, spider):
        # startURL的status将被记录，不论status是200-300或者其他
        if getattr(response.meta, 'rank', '') == '0-0' or 200 <= response.status < 300:
            return
        # if 200 <= response.status < 300:  # common case
        #     return
        meta = response.meta
        if 'handle_httpstatus_all' in meta:
            return
        if 'handle_httpstatus_list' in meta:
            allowed_statuses = meta['handle_httpstatus_list']
        elif self.handle_httpstatus_all:
            return
        else:
            allowed_statuses = getattr(spider, 'handle_httpstatus_list', self.handle_httpstatus_list)
        if response.status in allowed_statuses:
            return
        raise HttpError(response, 'Ignoring non-200 response')

    def process_spider_exception(self, response, exception, spider):
        if isinstance(exception, HttpError):
            # logger.info， logo记录所有status不在200-300的url
            logging.log(logging.INFO,
                "Ignoring response %(response)r: HTTP status code is not handled or not allowed",
                {'response': response}, extra={'spider': spider},
            )
            return []