# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from networkx.drawing import draw_random
from scrapy import signals

# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
import random
import time
from selenium import webdriver
from scrapy.http import HtmlResponse
from selenium.webdriver.chrome.service import Service
from mySpider.settings import USER_AGENT_LIST,count,ipPool
import requests



class SeleniumMiddleware(object):
    def process_request(self, request, spider):
        ua = random.choice(USER_AGENT_LIST)
        request.headers['User-Agent'] = ua
        temp = 'http://ecs.hailiangip.com:8422/api/getIpEncrypt?dataType=1&encryptParam=SlDyzgfgDW12vuaMHmQkMyBYXf0wSdR0KIhVhoMMPHvy912xFHA3Hogn7b2rQpv2SCJwzD37L%2FsUW4QY%2FPsl%2FhsUg816Y2ixIm%2FOIPVCxT%2FLy8dKIrhXae%2BjMxq4rD3xXb9dVNIYXivmMFVWOxlWq4%2Be0gBhzT6MPkN515dUA6BXwMMY2Pp7wRNtgRIJmPbHKHmQBjYm32MK13MpScW7XF7%2FeDXlL0x6IKTgy4kKtwBCImxL425Pnd7uIXyLhWax'
        cookies = {data.split('=')[0]: data.split('=')[-1] for data in temp.split(';')}
        #随机ip代理
        #self.RandomIp(request=request)
        request.headers['Cookie'] = cookies
        url = request.url
        #对调度器传来的request请求做拦截，过滤出需要动态渲染的页面
        if 'query' in url:
            driver = self.startBrowser()
            driver.get(url)
            time.sleep(5)
            #获取渲染之后的源码
            data = driver.page_source
            driver.close()
            #创建响应对象
            res = HtmlResponse(url,body=data,encoding='utf-8',request=request)
            #提前返回response体
            return res

    def RandomIp(self,request):
        # 随机选中一个ip
        ip = random.choice(ipPool)
        print('当前ip', ip, '-----', count['count'])
        # 更换request的ip
        request.meta['proxy'] = ip
        # 如果循环大于20,就清理ip池,更换ip的内容
        if count['count'] > 20:
            print('-------------切换ip------------------')
            count['count'] = 0
            ipPool.clear()
            ips = requests.get('https://dps.kdlapi.com/api/getdps/?secret_id=ooao4hee8xdac0l7tfum&signature=a6rnrbaql2yzlbanz5zsfn8lb28p5nuk&num=20&pt=1&format=text&sep=1&dedup=1')
            for ip in ips.text.split('\r\n'):
                ipPool.append('http://' + ip)
        # 每次访问,计数器+1
        count['count'] += 1

    def startBrowser(self):
        service = Service('./mySpider/chromedriver.exe')
        options = webdriver.ChromeOptions()
        # #浏览器复用，不再新开浏览器操作，对boss网反爬机制有效果
        # options.add_experimental_option('debuggerAddress', 'localhost:9222')
        # 隐藏浏览器被自动化控制的迹象，避免被某些网站检测到自动化操作。
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        browser = webdriver.Chrome(service=service, options=options)
        return browser
