# -*- coding: utf-8 -*-
import random, os, time, logging, json
logger = logging.getLogger(__name__)

from scrapy import signals
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import TimeoutException, NoSuchElementException, NoSuchAttributeException
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from scrapy import signals
from fake_useragent import UserAgent
from scrapy.http import HtmlResponse
from selenium.webdriver.common.keys import Keys


# class SpiderSpiderMiddleware(object):
#     # Not all methods need to be defined. If a method is not defined,
#     # scrapy acts as if the spider middleware does not modify the
#     # passed objects.
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         # This method is used by Scrapy to create your spiders.
#         s = cls()
#         crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
#         return s
#
#     def process_spider_input(self, response, spider):
#         # Called for each response that goes through the spider
#         # middleware and into the spider.
#
#         # Should return None or raise an exception.
#         return None
#
#     def process_spider_output(self, response, result, spider):
#         # Called with the results returned from the Spider, after
#         # it has processed the response.
#
#         # Must return an iterable of Request, dict or Item objects.
#         for i in result:
#             yield i
#
#     def process_spider_exception(self, response, exception, spider):
#         # Called when a spider or process_spider_input() method
#         # (from other spider middleware) raises an exception.
#
#         # Should return either None or an iterable of Response, dict
#         # or Item objects.
#         pass
#
#     def process_start_requests(self, start_requests, spider):
#         # Called with the start requests of the spider, and works
#         # similarly to the process_spider_output() method, except
#         # that it doesn’t have a response associated.
#
#         # Must return only requests (not items).
#         for r in start_requests:
#             yield r
#
#     def spider_opened(self, spider):
#         spider.logger.info('Spider opened: %s' % spider.name)


class RandomUserAgentDownloadMiddleware(object):
    """
    DownloadMiddleware: 通过fake-useragent第三方插件实现动态更换user-agent
    """
    def __init__(self, crawler):
        super(RandomUserAgentDownloadMiddleware, self).__init__()
        self.ua = UserAgent()
        # 获取模式
        self.ua_type = crawler.settings.get("RANDOM_USER_AGENT_TYPE", "random")

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler)

    def process_request(self, request, spider):

        def get_ua():
            return getattr(self.ua, self.ua_type) #ua.random

        user_agent = get_ua()
        #设置headers
        request.headers.setdefault('User-Agent', user_agent)


class RandomIpProxyDownloadMiddleware(object):
    """
    DownloadMiddleware: 动态代理iP
    """
    def process_request(self, request, spider):
        pass
        # request.meta["proxy"] = "http://36.25.34.0:808"


class SeleniumChromeDownloadMiddleware(object):
    """
    DownloadMiddleware: 拦截scrapy自带下载器的下载功能
        使用selenium内置的chrome浏览器动态加载html页面并返回给spider
    # for i in range(0, 150):
    #     spider.chrome.find_element_by_xpath('/html/body').send_keys(Keys.DOWN)
    """
    def process_request(self, request, spider):
        if [0 for image_suffix in spider.chromeUrl if image_suffix in request.url]:
            """
            使用selenium chrome下载
            """
            spider.chrome.get(request.url)
            body = spider.chrome.page_source
            result = {}
            '''
            浏览淘宝首页页面信息
            '''
            if "www.taobao.com" in request.url:
                try:
                    munu_jiaju_href = spider.chrome.find_element(By.XPATH, '/html/body/div[4]/div[1]/div[1]/div[1]/div/ul/li[12]/a[1]').get_attribute("href")
                    if "s.taobao.com" not in munu_jiaju_href: munu_jiaju_href = None # 判断url的有效性
                    result["munu_jiaju_href"] = munu_jiaju_href
                except NoSuchElementException as e:
                    logger.info('浏览淘宝首页页面信息-NoSuchElementException-【{0}】-【{1}】'.format(e, request.url))
                except NoSuchAttributeException as e:
                    logger.info('浏览淘宝首页页面信息-NoSuchAttributeException-【{0}】-【{1}】'.format(e, request.url))
                finally:
                    body = json.dumps(result, ensure_ascii=False)
            else:
                for i in range(100, 1050, 50): #下拉滚动条到浏览器底部
                    spider.chrome.execute_script('window.scrollBy(0,{0});'.format(i))
                '''
                浏览淘宝家具分页页面信息
                '''
                if "s.taobao.com" in request.url:
                    # 分页列表信息
                    try:
                        jiaju_hrefs = []
                        jiajus = spider.chrome.find_elements(By.XPATH,
                            '//*[@id="minisrp-itemlist"]/div[@class="m-itemlist"]/div[@class="grid g-clearfix"]/div[@class="items"]/div')
                        for jiaju in jiajus:
                            href = jiaju.find_element(By.XPATH, 'div/div/div/a').get_attribute("href")
                            if "item.taobao.com" in href or "click.simba.taobao.com" in href: jiaju_hrefs.append(href) # 判断家具链接的有效性
                    except NoSuchElementException as e:
                        logger.info('浏览淘宝家具分页页面信息-NoSuchElementException-【{0}】-【{1}】'.format(e, request.url))
                    except NoSuchAttributeException as e:
                        logger.info('浏览淘宝家具分页页面信息-NoSuchAttributeException-【{0}】-【{1}】'.format(e, request.url))
                    finally:
                        result["jiaju_hrefs"] = jiaju_hrefs
                    # 分页信息
                    try:
                        current_pageButton = spider.chrome.find_element(By.XPATH,
                            '//*[@id="minisrp-pager"]/div/div/div/ul/li[@class="item active"]')
                        result["current_pageNum"] = current_pageButton.text
                        next_pageButton = current_pageButton.find_element(By.XPATH, 'following-sibling::li[1]')
                        ActionChains(spider.chrome).context_click(next_pageButton).click(current_pageButton).perform() # js事件
                        next_pageButtonHref = next_pageButton.find_element(By.XPATH, 'a').get_attribute("href") # 下一页分页链接
                        if "s.taobao.com" in next_pageButtonHref: # 判分页断链接的有效性
                            result["next_pageButtonHref"] = next_pageButtonHref
                    except NoSuchElementException as e:
                        logger.info('浏览淘宝家具分页页面信息(分页信息)-NoSuchElementException-【{0}】-【{1}】'.format(e, request.url))
                    except NoSuchAttributeException as e:
                        logger.info('浏览淘宝家具分页页面信息(分页信息)-NoSuchAttributeException-【{0}】-【{1}】'.format(e, request.url))
                    finally:
                        body = json.dumps(result, ensure_ascii=False)
                '''
                浏览淘宝家具详情页面信息
                '''
                if "item.taobao.com" in request.url or "click.simba.taobao.com" in request.url:
                    result["url"] = request.url #家具页链接
                    result["current_pageNum"] = request.meta.get("current_pageNum", None) #所属分页号
                    result["sequence"] = request.meta.get("sequence", None) #所属分页第几条
                    # 获取家具商品的标题
                    try:
                        title = spider.chrome.find_element(By.XPATH, '//*[@id="J_Title"]/h3').text
                        result["title"] = title # 家具标题
                    except NoSuchElementException as e:
                        logger.info('浏览淘宝家具分页页面信息(获取家具商品的标题)-NoSuchElementException-【{0}】-【{1}】'.format(e, request.url))
                    except NoSuchAttributeException as e:
                        logger.info('浏览淘宝家具分页页面信息(获取家具商品的标题)-NoSuchAttributeException-【{0}】-【{1}】'.format(e, request.url))
                    finally:
                        specie = "家具类型获取失败" # 家具类型
                        if "沙发" in title: specie = "沙发"
                        if "床" in title: specie = "床"
                        if "柜" in title: specie = "柜子"
                        if "茶" in title: specie = "茶几"
                        if "桌" in title: specie = "桌子"
                        if "椅" in title: specie = "椅子"
                        if "书架" in title: specie = "架子"
                        result["specie"] = specie
                    # 获取家具商品的价格
                    try:
                        price = spider.chrome.find_element(By.XPATH, '//*[@id="J_PromoPriceNum"]')
                        result["price"] = price.text
                    except NoSuchElementException as e:
                        try:
                            price = spider.chrome.find_element(By.XPATH, '//*[@id="J_StrPrice"]/em[2]')
                            result["price"] = price.text
                        except NoSuchElementException as e:
                            logger.info('浏览淘宝家具分页页面信息(获取家具商品的价格)-NoSuchElementException-【{0}】-【{1}】'.format(e, request.url))
                    # 获取首张图片链接存入列表中
                    try:
                        image_urls = []
                        # image_nodes = spider.chrome.find_elements(By.XPATH, '//*[@id="J_UlThumb"]/li[not(@id="J_VideoThumb")]')
                        # for image_node in image_nodes:
                        #     ActionChains(spider.chrome).move_to_element(image_node).perform() # js事件
                        #     break
                        # image_url = spider.chrome.find_element(By.ID, 'J_ImgBooth').get_attribute("src").replace("_400x400.jpg_.webp", "")  # 家具封面图片链接
                        # if "alicdn.com" in image_url:
                        #     image_urls.append(image_url)
                    except NoSuchElementException as e:
                        logger.info('浏览淘宝家具详情页面信息(获取首张图片链接)-NoSuchElementException-【{0}】-【{1}】'.format(e, request.url))
                    except NoSuchAttributeException as e:
                        logger.info('浏览淘宝家具详情页面信息(获取首张图片链接)-NoSuchAttributeException-【{0}】-【{1}】'.format(e, request.url))
                    finally:
                        result["image_urls"] = image_urls
                    # 获取评论累计评论栏信息
                    try:
                        # 获取累计评论数量
                        evaluation_node = spider.chrome.find_element(By.XPATH, '//*[@id="J_TabBar"]/li[2]/a/em')
                        result["evaluation_cumulative"] = evaluation_node.text
                        ActionChains(spider.chrome).click(evaluation_node).perform()  # js事件
                    except NoSuchElementException as e:
                        logger.info('浏览淘宝家具详情页面信息(获取累计评论数量)-NoSuchElementException-【{0}】-【{1}】'.format(e, request.url))
                    finally:
                        # 追加评论
                        try:
                            evaluation_additional = WebDriverWait(spider.chrome, 10).until(
                                EC.presence_of_element_located((By.XPATH,
                                    '//*[@id="reviews"]/div/div/div/div/div/div[1]/div/ul/li[3]/label/span/span'))
                            ).text.lstrip("(").rstrip(")")
                            result["evaluation_additional"] = evaluation_additional
                        except TimeoutException as e:
                            # spider.chrome.execute_script('window.stop()')
                            logger.info('浏览淘宝家具详情页面信息(追加评论)-TimeoutException-【{0}】-【{1}】'.format(e, request.url))
                        # 好评
                        try:
                            evaluation_good = WebDriverWait(spider.chrome, 10).until(
                                EC.presence_of_element_located((By.XPATH,
                                    '//*[@id="reviews"]/div/div/div/div/div/div[1]/div/ul/li[4]/label/span/span[1]'))
                            ).text.lstrip("(").rstrip(")")
                            result["evaluation_good"] = evaluation_good
                        except TimeoutException as e:
                            logger.info('浏览淘宝家具详情页面信息(好评)-TimeoutException-【{0}】-【{1}】'.format(e, request.url))
                        # 中评
                        try:
                            evaluation_medium = WebDriverWait(spider.chrome, 10).until(
                                EC.presence_of_element_located((By.XPATH,
                                    '//*[@id="reviews"]/div/div/div/div/div/div[1]/div/ul/li[5]/label/span/span'))
                            ).text.lstrip("(").rstrip(")")
                            result["evaluation_medium"] = evaluation_medium
                        except TimeoutException as e:
                            logger.info('浏览淘宝家具详情页面信息(好评)-TimeoutException-【{0}】-【{1}】'.format(e, request.url))
                        # 差评
                        try:
                            evaluation_poor = WebDriverWait(spider.chrome, 10).until(
                                EC.presence_of_element_located((By.XPATH,
                                    '//*[@id="reviews"]/div/div/div/div/div/div[1]/div/ul/li[6]/label/span/span'))
                            ).text.lstrip("(").rstrip(")")
                            result["evaluation_poor"] = evaluation_poor
                        except TimeoutException as e:
                            logger.info('浏览淘宝家具详情页面信息(好评)-TimeoutException-【{0}】-【{1}】'.format(e, request.url))
                        body = json.dumps(result, ensure_ascii=False)
            return HtmlResponse(url=spider.chrome.current_url, body=body, encoding="utf-8", request=request)
        else:
            pass
            """
            使用scrapy下载器下载
            """
            """
            https://gd2.alicdn.com/imgextra/i4/881466827/TB2dwNUlY_I8KJjy1XaXXbsxpXa_!!881466827.jpg_400x400.jpg_.webp
            https://gd2.alicdn.com/imgextra/i3/752356878/TB2LeDTm_vI8KJjSspjXXcgjXXa_!!752356878.jpg_400x400.jpg_.webp
            https://gd1.alicdn.com/imgextra/i1/3310658323/TB2Lu3WmTTI8KJjSsphXXcFppXa_!!3310658323.jpg_400x400.jpg_.webp
            """



