#####
#####   此处利用selenium将我们的数据
#####   spider实例对象是可以访问到我们配置文件的
#####   中间件配置：用来对我们的请求和响应做出一些控制
#####   介于调度器的请求对象队列和下载器之间
#####   中间件一般情况下可不写
#####   下载中间件介引擎engine、和下载器downloader之间
#####   下载中间件分为请求下载中间件和响应下载中间件，对应要实现的方法：process_request,process_response
#####   下载中间件通常作用于防爬，使用场景有添加用户代理，添加ip端口代理
#####   说明：此处自定义process_response请求中间件中将响应数据临时存储在项目根目录下tem.html下
#####




from scrapy import signals
from itemadapter import is_item, ItemAdapter
import random
from selenium import webdriver
from  scrapy.http import HtmlResponse
from selenium.common.exceptions import TimeoutException
import time

class RandomUserAgentmiddleware:

    def process_request(self,request,spider):
        pass

    


class JdongMiddleware:
    def process_request(self,request,spider):
        
        if spider.name == 'yq' or spider.name=='jobs' or spider.name=="jd" or spider.name == "tbd":
            try:
                # time.sleep(0.6)
                
                spider.browser.get(request.url)
                
                time.sleep(0.2)
                # try:
                #     self.browser.find_element_by_xpath(".//div[@class='sufei-dialog-close']").click()
                # except Exception:
                #     pass
            except TimeoutException as e:
                print('超时', e)
                spider.browser.execute_script('window.stop()')
            time.sleep(0.9)
            return HtmlResponse(url=spider.browser.current_url, body=spider.browser.page_source,
                                encoding="utf-8", request=request)

    def process_response(self,request,response,spider):
        # print("xiangying中间件")
        if spider.name == 'info':
            pass
        else:
            with open("tem.html","w",encoding="utf-8") as f:
                f.write(response.text)
        return response 

class AmaSpiderMiddleware:

    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        return None

    def process_spider_output(self, response, result, spider):
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        pass

    def process_start_requests(self, start_requests, spider):
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

class AmaDownloaderMiddleware:

    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        return None

    def process_response(self, request, response, spider):
        return response

    def process_exception(self, request, exception, spider):
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)
