# -*- coding: utf-8 -*-
import os, logging
logger = logging.getLogger(__name__)

from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider
from selenium.common.exceptions import NoSuchElementException, NoSuchAttributeException
from selenium.webdriver.common.action_chains import ActionChains
from scrapy import signals
from selenium import webdriver
from scrapy.http import Request
from urllib import parse

from Spider.util.tool import get_md5_str, get_datetime
from Spider.items import JinDongItem


class JindongSpider(RedisCrawlSpider):
    """
    CrawlSpider类定义了一些规则(rule)来提供跟进link的机制,适合从爬取的网页中获取link并继续爬取
    rules中包含一个或多个Rule对象,每个Rule对爬取网站的动作定义了特定操作。如果多个rule匹配了相同的链接,则根据规则在本集合中被定义的顺序，第一个会被使用
        LinkExtractor有唯一的公共方法是extract_links(),它接收一个 Response 对象,并返回一个 scrapy.link.Link 对象
            allow：满足括号中“正则表达式”的值会被提取，如果为空，则全部匹配
            deny：与这个正则表达式(或正则表达式列表)不匹配的URL一定不提取
            allow_domains：会被提取的链接的domains
            deny_domains：一定不会被提取链接的domains
            restrict_xpaths：使用xpath表达式,和allow共同作用过滤链接
        callback： 从link_extractor中每获取到链接时，参数所指定的值作为回调函数，该回调函数接受一个response作为其第一个参数。
            当编写爬虫规则时,避免使用parse作为回调函数。由于CrawlSpider使用parse方法来实现其逻辑,如果覆盖了parse方法,crawlspider将会运行失败
        follow：是一个布尔(boolean)值，指定了根据该规则从response提取的链接是否需要跟进.如果callback为None,follow 默认设置为True,否则默认为False
        process_links：指定该spider中哪个的函数将会被调用，从link_extractor中获取到链接列表时将会调用该函数。该方法主要用来过滤。
        process_request：指定该spider中哪个的函数将会被调用， 该规则提取到每个request时都会调用该函数。 (用来过滤request)。


    爬取京东网家具信息
    workon spider && D: && cd D:\workspace\oschina\Spider\Spider\spiders && scrapy runspider jindong.py

    c: && cd C:\Program Files\Redis && redis-cli.exe
    auth li1234redis
    lpush jindong:start_urls https://channel.jd.com/furniture.html
    """
    name = 'jindong'
    redis_key = 'jindong:start_urls'
    allowed_domains = ['www.jd.com', 'channel.jd.com', 'search.jd.com', 'item.jd.com']
    rules = (
        # 获取首页面的所有类型家具链接
        Rule(
            LinkExtractor(allow=(r'https://search.jd.com/Search?keyword=/.*',)),
            callback='loadPage',
        ),
        # 每个类型家具页面的家具链接列表
        # Rule(
        #     LinkExtractor(
        #         allow=r'https://item.jd.com/\d+.html'
        #     ), callback='loadItem', follow=False,
        # ),
    )
    custom_settings = {
        "LOG_FILE": os.path.normpath(
            os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "logs/jindong.log")),
        "IMAGES_STORE": os.path.normpath(
            os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "media/images/jindong/")),
    }

    def __init__(self, *args, **kwargs):
        self.prefs = {"profile.managed_default_content_settings.images": 2}
        self.driver_path = os.path.normpath(
            os.path.join(os.path.dirname(os.path.dirname(__file__)), "util/seleniumdriver/chromedriver.exe"))
        # 设置不加载图片
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_experimental_option("prefs", self.prefs)
        # 创建chrome浏览器
        self.browser = webdriver.Chrome(executable_path=self.driver_path, chrome_options=chrome_options)
        # 窗口最大化
        self.browser.maximize_window()
        super(JindongSpider, self).__init__(*args, **kwargs)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(JindongSpider, cls).from_crawler(crawler, *args, **kwargs)
        # 当JindongSpider爬虫关闭时并关闭chrome浏览器
        crawler.signals.connect(spider.spider_closed, signals.spider_closed)
        return spider

    def spider_closed(self, spider):
        # 关闭chrome浏览器
        logger.info(logger.info('JindongSpider-%s:爬虫关闭,关闭浏览器', spider.name))
        self.browser.quit()

    def loadPage(self, response):
        print(response.url)
        # yield Request(url=parse.urljoin(response.url, "http://www.baidu.com"))

    def loadItem(self, response):
        """
        :param response:
        :return: JinDongItem对象

        xpath必须通过当前模拟浏览器获取
        """
        # 标题
        title = response.xpath('/html/body/div[7]/div/div[2]/div[1]/text()').extract_first("NULL").strip()
        # 价格
        market_price = response.xpath('/html/body/div[7]/div/div[2]/div[3]/div/div[1]/div[2]/span[1]/span[2]/text()').extract_first("NULL").strip()
        # 家具类型（沙发,床..）
        type = response.xpath('//div[@id="crumb-wrap"]/div/div[1]/div[5]/a/text()').extract_first("NULL").strip()
        # 所属店铺的链接
        storeLink = response.xpath('//div[@id="popbox"]/div/div[1]/h3/a/@href').extract_first("NULL").strip()
        # 封面图片对象列表
        images = []
        try:
            image_nodes = self.browser.find_elements_by_xpath('//*[@id="spec-list"]/ul/li')
            for image_node in image_nodes:
                # 点击或者悬停节点
                ActionChains(self.browser).click_and_hold(image_node).perform()
                # 获取放大的封面图片【selenium实时加载页面中数据】
                img = self.browser.find_element_by_xpath('//img[@id="spec-img"]')
                data_origin = img.get_attribute("data-origin")
                images.append(data_origin)
                # jqimg = img.get_attribute("jqimg")
                # src = img.get_attribute("src")
        except NoSuchElementException as e:
            logger.info('JindongSpider-loadItem方法获封面节点失败:{0}'.format(e))
        except NoSuchAttributeException as e:
            logger.info('JindongSpider-loadItem方法获取封面节点属性失败:{0}'.format(e))
        finally:
            image_nodes = None
        # 商品评论
        try:
            evaluation_node = self.browser.find_element_by_xpath('//*[@id="detail"]/div[1]/ul/li[5]')
            # 点击节点
            ActionChains(self.browser).click(evaluation_node).perform()
            # 好评度
            evaluation1_node = self.browser.find_element_by_xpath('//*[@id="comment"]/div[2]/div[1]/div[1]/div')
            evaluation1 = evaluation1_node.text
            # 全部评价
            evaluation2_node = self.browser.find_element_by_xpath('//*[@id="comment"]/div[2]/div[2]/div[1]/ul/li[1]/a/em')
            evaluation2 = evaluation2_node.text
            # 晒图评价
            evaluation3_node = self.browser.find_element_by_xpath('//*[@id="comment"]/div[2]/div[2]/div[1]/ul/li[2]/a/em')
            evaluation3 = evaluation3_node.text
            # 追加评论
            evaluation4_node = self.browser.find_element_by_xpath('//*[@id="comment"]/div[2]/div[2]/div[1]/ul/li[3]/a/em')
            evaluation4 = evaluation4_node.text
            # 好评
            evaluation5_node = self.browser.find_element_by_xpath('//*[@id="comment"]/div[2]/div[2]/div[1]/ul/li[4]/a/em')
            evaluation5 = evaluation5_node.text
            # 中评
            evaluation6_node = self.browser.find_element_by_xpath('//*[@id="comment"]/div[2]/div[2]/div[1]/ul/li[5]/a/em')
            evaluation6 = evaluation6_node.text
            # 差评
            evaluation7_node = self.browser.find_element_by_xpath('//*[@id="comment"]/div[2]/div[2]/div[1]/ul/li[6]/a/em')
            evaluation7 = evaluation7_node.text
        except NoSuchElementException as e:
            logger.info('JindongSpider-loadItem方法获商品评价节点失败:{0}'.format(e))
        except NoSuchAttributeException as e:
            logger.info('JindongSpider-loadItem方法获商品评价节点属性失败:{0}'.format(e))
        finally:
            evaluation_node = None



        item = JinDongItem()
        item['url'] = response.url
        item['title'] = title
        item['type'] = "类型待定"
        item['market_price'] = market_price
        item['images'] = images
        item['push_date'] = get_datetime()
        yield None
