# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
from scrapy.utils.project import get_project_settings
from scrapy.http import HtmlResponse
import asyncio, time
import pyppeteer
from pyppeteer import launch
import random

pyppeteer.DEBUG = False


class TaobaoSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class TaobaoDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


# class SeleniumMiddleware(object):
#     def __init__(self):
#         self.logger = getLogger(__name__)
#         # self.options = webdriver.ChromeOptions()
#         # self.options.add_argument('--headless')
#         # self.options.add_argument('--disable-gpu')
#         # self.browser = webdriver.Chrome(chrome_options=self.options)
#         self.browser = webdriver.Firefox()
#         self.browser.set_window_size(1400, 700)
#
#     # def __del__(self):
#     #     """
#     #     用于关闭浏览器
#     #     :return:
#     #     """
#     #     self.browser.close()
#
#     def process_request(self, request, spider):
#         """
#         用chrome抓取页面
#         :param request: Request对象
#         :param spider: Spider对象
#         :return: HtmlResponse
#         """
#         self.logger.debug('Chrome is Starting')
#         self.browser.get(request.url)
#         content = self.browser.page_source.encode('utf-8')
#         if content.strip() != '':
#             return HtmlResponse(request.url, encoding='utf-8', body=content, request=request)
#         return None
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         # This method is used by Scrapy to create your spiders.
#         # Scrapy使用此方法创建您的蜘蛛。
#         return cls()


class ChromeMiddleware(object):
    def __init__(self):
        self.user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
        self.eval = "() =>{ Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) }"
        self.login_url = "https://login.taobao.com/member/login.jhtml?spm=a21bo.2017.754894437.1.5af911d99dTxv8&f=top&redirectURL=https%3A%2F%2Fwww.taobao.com%2F"
        self.setting = get_project_settings()
        self.key_word = "爱淘宝"
        loop = asyncio.get_event_loop()
        task = asyncio.ensure_future(self.__get_browser())
        loop.run_until_complete(task)

    async def __get_browser(self):
        self.browser = await launch(headless=False)

    def process_request(self, request, spider):
        loop = asyncio.get_event_loop()
        task = asyncio.ensure_future(self.get_page(request.url))
        loop.run_until_complete(task)
        # return task.result()
        return HtmlResponse(url=request.url, body=task.result(), encoding="utf-8", request=request)

    def __del__(self):
        """
        管道结束后执行
        :return:
        """
        loop = asyncio.get_event_loop()
        task = asyncio.ensure_future(self.__close_browser())
        loop.run_until_complete(task)

    async def __close_browser(self):
        """
        用于关闭浏览器
        :return:
        """
        await self.browser.close()

    @classmethod
    async def try_validation(cls, page, distance=308):
        # 将距离拆分成两段，模拟正常人的行为
        distance1 = distance - 10
        distance2 = 10
        btn_position = await page.evaluate('''
           () =>{
            return {
             x: document.querySelector('#nc_1_n1z').getBoundingClientRect().x,
             y: document.querySelector('#nc_1_n1z').getBoundingClientRect().y,
             width: document.querySelector('#nc_1_n1z').getBoundingClientRect().width,
             height: document.querySelector('#nc_1_n1z').getBoundingClientRect().height
             }}
            ''')
        x = btn_position['x'] + btn_position['width'] / 2
        y = btn_position['y'] + btn_position['height'] / 2
        await page.mouse.move(x, y)
        await page.mouse.down()
        await page.mouse.move(x + distance1, y, {'steps': 30})
        await page.waitFor(800)
        await page.mouse.move(x + distance1 + distance2, y, {'steps': 20})
        await page.waitFor(800)
        await page.mouse.up()

    async def login(self, browser):
        """
        登录模块，如果cookie失效，则重新登录，刷新cookie
        :param browser:
        :return:
        """
        login = await browser.newPage()
        await login.setUserAgent(self.user_agent)
        await login.evaluateOnNewDocument(self.eval)
        await login.goto(self.login_url)
        try:
            await login.click("#J_QRCodeLogin > div.login-links > a.forget-pwd.J_Quick2Static")
            await login.type("#TPL_username_1", "zrz_work@163.com", {'delay': 50})
            await login.type("#TPL_password_1", "YYan.1026", {'delay': 50})
        except Exception as e:
            print(e)
            await login.type("#TPL_username_1", "17630903862", {'delay': 50})
            await login.type("#TPL_password_1", "YYan.1026", {'delay': 50})

        time.sleep(2)
        slider = await login.Jeval('#nocaptcha', 'node => node.style')
        if slider:
            print("有滑块")
            await slider[0].click()
            await login.waitFor(1000)
            await self.try_validation(login)
            await asyncio.sleep(5)
            cookie = await login.cookies()
            self.setting.set('COOKIE', cookie)
            await login.close()
        else:
            await login.click("#J_SubmitStatic")
        cookie = await login.cookies()
        await asyncio.sleep(5)
        self.setting.set('COOKIE', cookie)
        await login.close()
        await browser.close()

    async def get_page(self, url):
        """
        获取页面信息
        :param url:
        :return:
        """
        page = await self.browser.newPage()
        await page.setUserAgent(self.user_agent)
        await page.evaluateOnNewDocument(self.eval)
        cookie = self.setting.get('COOKIE')
        # 判断是否有cookie
        if cookie:
            for i in cookie:
                await page.setCookie(i)
        await page.goto(url)
        # await asyncio.sleep(5)
        title = await page.title()
        if self.key_word in title:
            await self.login(self.browser)
            await self.get_page(url)
        else:
            content = ''
            big_sort = await page.JJ('.J_Prop')
            # 单个页面，多个类别商品的处理
            content = await self.category_processing(big_sort, content, page)
            await asyncio.sleep(random.randint(2, 4))
            await page.close()
            return content

        """
        <ul class="J_TSaleProp>
            <li>代表一个分类</li>
        </ul>
        """

    async def category_processing(self, big_sort, content, page):
        """
        单个页面，多分类商品处理
        :param big_sort:
        :param content:
        :param page:
        :return:
        """
        if len(big_sort) > 1:
            sort1 = await page.JJ('.tb-skin .J_Prop:nth-child(1) .J_TSaleProp li')
            for i in sort1:
                await i.click()
                sort2 = await page.JJ('.tb-skin .J_Prop:nth-child(2) .J_TSaleProp li:not(.tb-out-of-stock)')
                await asyncio.sleep(random.randint(2, 4))
                # 小类循环点击函数
                content = await self.click_category(content, page, sort2)
                await page.click('.tb-skin .J_Prop:nth-child(2) .J_TSaleProp li:nth-child(3)')
                await asyncio.sleep(random.randint(2, 4))
        elif len(big_sort) <= 1:
            sort = await page.JJ('.J_TSaleProp li')
            if len(sort) > 1:
                # 小类循环点击函数
                content = await self.click_category(content, page, sort)
            else:
                content += '<customSegmentation>\n'
                content += await page.content()
                content += '</customSegmentation>\n'
        else:
            content += '<customSegmentation>\n'
            content += await page.content()
            content += '</customSegmentation>\n'
        return content

    # 待处理功能，为每一个页面拼接的地方加入分割线
    @staticmethod
    async def click_category(content, page, sort):
        """
        小类循环点击函数
        :param content: 接受当前页面
        :param page: obj，当前页面类
        :param sort: 待循环的元素
        :return: 每次点击后，拼接起来的页面数据
        """
        for n in sort:
            content += '<customSegmentation>\n'
            await n.click()
            element = await page.content()
            content += element
            content += '</customSegmentation>\n'
            await asyncio.sleep(random.randint(2, 4))
        return content

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        # Scrapy使用此方法创建您的蜘蛛。
        return cls()

# class FundscrapyDownloaderMiddleware(object):
#     # Not all methods need to be defined. If a method is not defined,
#     # scrapy acts as if the downloader middleware does not modify the
#     # passed objects.
#     def __init__(self):
#         loop = asyncio.get_event_loop()
#         task = asyncio.ensure_future(self.getbrowser())
#         loop.run_until_complete(task)
#         # self.browser = task.result()
#         print(self.browser)
#         print(self.page)
#         # self.page = await browser.newPage()
#
#     async def getbrowser(self):
#         self.browser = await launch()
#         self.page = await self.browser.newPage()
#         # return await pyppeteer.launch()
#
#     async def getnewpage(self):
#         return await self.browser.newPage()
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         # This method is used by Scrapy to create your spiders.
#         s = cls()
#         crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
#         return s
#
#     def process_request(self, request, spider):
#         # Called for each request that goes through the downloader
#         # middleware.
#
#         # Must either:
#         # - return None: continue processing this request
#         # - or return a Response object
#         # - or return a Request object
#         # - or raise IgnoreRequest: process_exception() methods of
#         #   installed downloader middleware will be called
#         loop = asyncio.get_event_loop()
#         task = asyncio.ensure_future(self.usePypuppeteer(request))
#         loop.run_until_complete(task)
#         # return task.result()
#         return HtmlResponse(url=request.url, body=task.result(), encoding="utf-8", request=request)
#
#     async def usePypuppeteer(self, request):
#         print(request.url)
#         # page = await self.browser.newPage()
#         await self.page.goto(request.url)
#         content = await self.page.content()
#         return content
#
#     def process_response(self, request, response, spider):
#         # Called with the response returned from the downloader.
#
#         # Must either;
#         # - return a Response object
#         # - return a Request object
#         # - or raise IgnoreRequest
#         return response
#
#     def process_exception(self, request, exception, spider):
#         # Called when a download handler or a process_request()
#         # (from other downloader middleware) raises an exception.
#
#         # Must either:
#         # - return None: continue processing this exception
#         # - return a Response object: stops process_exception() chain
#         # - return a Request object: stops process_exception() chain
#         pass
#
#     def spider_opened(self, spider):
#         spider.logger.info('Spider opened: %s' % spider.name)
