# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
import time
import urllib
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium.common import TimeoutException
from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from selenium.webdriver.common.by import By



class TaobaoSpiderDownloaderMiddleware:
    def spider_opened(self, spider):
        self.USER_AGENT_LIST = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
        self.option = ChromeOptions()
        # # 开启实验性功能
        self.option.add_experimental_option('excludeSwitches', ['enable-automation'])
        # 去除特征值
        self.option.add_argument('--disable-blink-features=AutomationControlled')
        self.driver = Chrome(options=self.option)
        self.driver.maximize_window()
        script = '''
        Object.defineProperty(navigator, 'webdriver', {
            get: () => undefined
        })
        '''
        self.driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {"source": script})
        spider.logger.info("Spider opened: %s" % spider.name)

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def process_request(self, request, spider):
        # https://login.taobao.com/member/login.jhtml
        request.headers['User-Agent'] = random.choice(self.USER_AGENT_LIST)
        print('当前开始爬取的url：', request.url)
        if request.url == 'https://login.taobao.com/member/login.jhtml':
            self.driver.get(request.url)

            # 在这里我直接扫码，这样可以规避很多麻烦的爬虫检测

            self.driver.implicitly_wait(20)
            # 点击保存 ：//*[@id="login"]/div[2]/div/div[3]/div[1]/div/div/div[4]/button[2]
            try:
                support_click = self.driver.find_element(By.XPATH,
                                                         '//*[@id="login"]/div[2]/div/div[3]/div[1]/div/div/div[4]/button[2]')
            except Exception as e:
                print('错误指令：', e)
                print('等待指令中')
            else:
                support_click.click()
                cookie = self.driver.get_cookies()
                print('****cookie', cookie)
                request.cookies = cookie
                mainpage = self.driver.find_element(By.XPATH, '//*[@id="J_SiteNavHome"]/div/div/span')
                mainpage.click()
                self.driver.implicitly_wait(6)
                print('显示当前页面的url：', self.driver.current_url)
                SearchInput = self.driver.find_element(By.ID, 'q')
                SearchCondition = input('请输入你要爬取的相关内容：')
                SearchInput.send_keys(SearchCondition)
                SearchCode = urllib.parse.quote(SearchCondition)
                spider.search_condition = SearchCode
                time.sleep(3)
                SearchClick = self.driver.find_element(By.XPATH, '//*[@id="J_TSearchForm"]/div[1]/button')
                SearchClick.click()
                print('***************正在进入当前搜索页面')
                time.sleep(5)
                total_height = 6000  # 总共需要滚动的像素
                scroll_increment = 500  # 每次滚动的像素
                num_scrolls = total_height // scroll_increment  # 需要滚动的次数
                for i in range(num_scrolls):
                    scroll_to = i * scroll_increment  # 计算当前需要滚动到的位置
                    self.driver.execute_script(f"window.scrollTo(0, {scroll_to});")
                    time.sleep(2)  # 等待加载
                time.sleep(2)
                return HtmlResponse(url=self.driver.current_url, body=self.driver.page_source, encoding='utf-8')
        else:
            # 点击下一页的操作
            if 'page=' in request.url:
                print('开始翻页爬取：', request.url)
                self.driver.get(request.url)
                self.driver.refresh()
                cookie = self.driver.get_cookies()
                request.cookies = cookie
                total_height = 6000  # 总共需要滚动的像素
                scroll_increment = 500  # 每次滚动的像素
                num_scrolls = total_height // scroll_increment  # 需要滚动的次数
                for i in range(num_scrolls):
                    scroll_to = i * scroll_increment  # 计算当前需要滚动到的位置
                    self.driver.execute_script(f"window.scrollTo(0, {scroll_to});")
                    time.sleep(3)  # 等待加载
                try:
                    WebDriverWait(self.driver, 10).until(
                        EC.presence_of_element_located((By.XPATH, '//*[@id="pageContent"]'))
                    )
                except TimeoutException:
                    print("页面加载超时")
                print('**************正在前往下一页')
                time.sleep(4)
                return HtmlResponse(url=self.driver.current_url, body=self.driver.page_source, encoding='utf-8')
            else:
                return None

    def process_response(self, request, response, spider):
        return response

    def spider_closed(self, spider):
        self.driver.quit()

# isLogin = WebDriverWait(self.driver, 30).until(EC.url_changes(request.url))
# if isLogin:
#     print('登录成功')
# time.sleep(5)
# smsphone = self.driver.find_element(By.XPATH, '//*[@id="login"]/div[1]/div/div/a[2]')
# smsphone.click()
# time.sleep(4)
# # 定位到手机输入框
# phoneInput = self.driver.find_element(By.ID, 'fm-sms-login-id')
# phoneInput.send_keys('19959797519')
# time.sleep(4)
# get_code = self.driver.find_element(By.CLASS_NAME, 'send-btn-link')
# # 点击获取验证码
# get_code.click()
# time.sleep(5)
# try:
#     iframe = self.driver.find_element(By.ID, 'baxia-dialog-content')
#     self.driver.switch_to.frame(iframe)
#     WebDriverWait(self.driver, 6).until(EC.visibility_of_element_located((By.ID, 'nc_1_n1z')))
#     slide = self.driver.find_element(By.XPATH, '//*[@id="nc_1_n1z"]')
#     print('滑块的定位：', slide.location)
#     print('*************破解滑块中')
# except Exception as e:
#     print('滑块错误提示：', e)
#     print('等待指令中')
# else:
#     action = ActionChains(self.driver)
#     action.click_and_hold(slide).perform()
#     action.move_by_offset(250, 0).perform()
#     action.release().perform()
#     self.driver.implicitly_wait(5)
#     self.driver.switch_to.default_content()
#     # 输入验证码
#     code = input('请输入你获取到的手机验证码：')
#     # 定位验证码框
#     codeInput = self.driver.find_element(By.ID, 'fm-smscode')
#     codeInput.send_keys(code)
#     time.sleep(3)
#     # 点击登录
#     LoginButton = self.driver.find_element(By.CLASS_NAME, 'fm-button fm-submit sms-login')
#     LoginButton.click()
#     # 后续点击 是否要保持登录状态
#     # 不保持： //*[@id="login-form"]/div[6]/div[1]/div/div/div[4]/button[1]
#     WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(
#         (By.XPATH, '//*[@id="login-form"]/div[6]/div[1]/div/div/div[4]/button[1]')))
#     noSupport = self.driver.find_element(By.XPATH,
#                                          '//*[@id="login-form"]/div[6]/div[1]/div/div/div[4]/button[1]')
#     noSupport.click()
