# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import huayiQDB.controller.log as log
from queue import Queue
from scrapy.http.response import Response
from scrapy.exceptions import IgnoreRequest
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
import huayiQDB.controller.dbUtils as db

class HuayiqdbSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # 对应的是流程图中的第6步，在response对象交给Spider爬虫进行解析前，可以对response进行处理。
        # 只能返回None或者抛出一个异常。。。

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # 对应流程图中的第7步，可以从response返回的结果中，对后续的item和request进行处理。
        # 必须返回Request或者Item对象

        # Must return an iterable of Request, or item objects.azzx
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class HuayiqdbDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.


    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request, 继续执行后续中间件的process_request()，直到将request交给downloader下载器进行下载；

        # - or return a Response object，如果返回Response对象，后续的中间件以及downloader下载器都不在执行，而是将Response对象返回给引擎。引擎将它交给Spider进行解析。

        # - or return a Request object，一般不会返回Request对象，将这个对象又存入了调度器，调度器会对返回的request进行重新调度。

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called

        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # - return a Response object，继续执行后续中间件的process_response()函数，最终返回给引擎；
        # - return a Request object，终止中间件的执行，会重新调度这个request;

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class Huayiqdb_Dispatch_DownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
    # 设计调度器： 控制单个访问学习视频连接的频率
    #   工作原理：控制访问视频连接依次单个访问，完成一个学习收集后，再执行下次访问视频连接
    #
    #
    logger = log.logger
    parse_study_queue = Queue(maxsize=0)
    course_queue = Queue(maxsize=0)
    item_queue = Queue(maxsize=0)
    course_num = 0 #课程计数器
    item_num = 0 #分类计数器
    num = 0  # 学习计数器
    cookie = ''
    def __init__(self):
        pass

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request, 继续执行后续中间件的process_request()，直到将request交给downloader下载器进行下载；

        # - or return a Response object，如果返回Response对象，后续的中间件以及downloader下载器都不在执行，而是将Response对象返回给引擎。引擎将它交给Spider进行解析。

        # - or return a Request object，一般不会返回Request对象，将这个对象又存入了调度器，调度器会对返回的request进行重新调度。

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called

        # from request to collect  and  To rquest  in Oder
        db_sys = spider.db_sys
        if request.meta.get("url_type") == 'item':
            if db_sys.isInItem(request.meta.get("NS_Info")['Item_Value']):
                raise IgnoreRequest('')
            if self.item_num == 0:
                self.item_num = self.item_num + 1
                return None
            request.meta['url_type'] = ''  # 清除拦截所需状态
            self.item_num = self.item_num + 1
            self.item_queue.put(request)
            self.logger.info("入___item_queue队列长度:" + str(self.item_queue.qsize()))
            raise IgnoreRequest('')

        if request.meta.get("R_type") == 'course':
            if db_sys.isInCourse(request.meta.get("Course_Info")['Course_Value']):
                raise IgnoreRequest('')
            if self.course_num == 0:
                self.course_num = self.course_num + 1
                return None
            request.meta['R_type'] = ''  #清除拦截所需状态
            self.course_num = self.course_num + 1
            self.course_queue.put(request)
            self.logger.info("入___course_queue队列长度:" + str(self.course_queue.qsize()))
            raise IgnoreRequest('')

        if request.meta.get("parse_course") == 'parse_course':
            if db_sys.isInStudy(request.meta.get("Study_Info")['Study_Value']):
                raise IgnoreRequest('')
            if self.num ==0:
                self.num = self.num + 1
                return None
            request.meta['parse_course'] = '' #清除拦截所需状态
            self.cookie = request.meta.get('cookie')
            self.num = self.num + 1
            self.parse_study_queue.put(request)
            self.logger.info("入___parse_study_queue队列长度:" + str(self.parse_study_queue.qsize()))
            raise IgnoreRequest('')

        if request.meta.get('getRequest') and self.parse_study_queue.qsize() !=  0:
            self.logger.info("----study------------单个学习开始------------------------------------------:")
            self.logger.info("出___parse_study_queue队列长度:" + str(self.parse_study_queue.qsize()))
            self.logger.info("出___course_queue队列长度:" + str(self.course_queue.qsize()))
            self.logger.info("出___item_queue队列长度:" + str(self.item_queue.qsize()))
            study_value = request.meta.get('study_value')
            db_sys.save_study(study_value)
            self.num = self.num -1
            if self.num ==1:
                self.num = 0
            self.logger.info("最终num:" + str(self.num))
            return self.parse_study_queue.get()

        if request.meta.get("getRequest") and self.parse_study_queue.qsize() == 0 and self.course_queue.qsize() !=0 :  # 是否一个课程已学习完成
            self.logger.info("-----course----新课程开始------------------------------")
            self.logger.info("出___parse_study_queue队列长度:" + str(self.parse_study_queue.qsize()))
            self.logger.info("出___course_queue队列长度:" + str(self.course_queue.qsize()))
            self.logger.info("出___item_queue队列长度:" + str(self.item_queue.qsize()))
            print('id:'+str(request.meta.get('id')))
            if request.meta.get('id') == 'study':
                study_value = request.meta.get('study_value')
                db_sys.save_study(study_value)
                Course_Info = request.meta.get('Course_Info')
                db_sys.save_course(Course_Info['Course_Value'])

            self.course_num = self.course_num - 1
            if self.course_num == 1:
                self.course_num = 0
            self.logger.info("最终course_num:" + str(self.course_num))
            return self.course_queue.get()

        if request.meta.get('getRequest') and self.parse_study_queue.qsize() == 0 and self.course_queue.qsize() ==0 and self.item_queue.qsize() != 0 :
            self.logger.info("-----item------新分类开始---------------------------:")
            self.logger.info("出___parse_study_queue队列长度:" + str(self.parse_study_queue.qsize()))
            self.logger.info("出___course_queue队列长度:" + str(self.course_queue.qsize()))
            self.logger.info("出___item_queue队列长度:" + str(self.item_queue.qsize()))
            print('id:'+str(request.meta.get('id')))
            if request.meta.get('id') == 'study':
                study_value = request.meta.get('study_value')
                db_sys.save_study(study_value)
                NS_Info = request.meta.get('NS_Info')
                db_sys.save_item(NS_Info['Item_Value'])
                Course_Info = request.meta.get('Course_Info')
                db_sys.save_course(Course_Info['Course_Value'])
            self.item_num = self.item_num -1
            if self.item_num == 1:
                self.item_num = 0
            self.logger.info("最终item_num:" + str(self.item_num))
            request = self.item_queue.get()
            request.meta['cookie'] = self.cookie
            return request
        return None



    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # - return a Response object，继续执行后续中间件的process_response()函数，最终返回给引擎；
        # - return a Request object，终止中间件的执行，会重新调度这个request;
        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

class Huayiqdb_Item_DownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    logger = log.logger
    def __init__(self):
        pass

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request, 继续执行后续中间件的process_request()，直到将request交给downloader下载器进行下载；

        # - or return a Response object，如果返回Response对象，后续的中间件以及downloader下载器都不在执行，而是将Response对象返回给引擎。引擎将它交给Spider进行解析。

        # - or return a Request object，一般不会返回Request对象，将这个对象又存入了调度器，调度器会对返回的request进行重新调度。

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called

        # from request to collect  and  To rquest  in Oder
        return None



    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.
        # - return a Response object，继续执行后续中间件的process_response()函数，最终返回给引擎；
        # - return a Request object，终止中间件的执行，会重新调度这个request;
        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.
        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)