import os

from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException,NoSuchAttributeException
from urllib import parse
from scrapy import signals,Selector
from scrapy_redis.spiders import RedisSpider
from scrapy.http import Request
from TaoBaoSpider.items import TaobaospiderItem
from TaoBaoSpider.util.tool import get_md5_str,get_datetime
import logging
logger = logging.getLogger(__name__)


class TaoBaoJiaJuSpider(RedisSpider):
    """
    c: && cd C:\Program Files\Redis && redis-cli.exe
    auth li1234redis
    lpush taobao_jiaju:start_urls https://www.taobao.com/
    """
    name = 'taobao_jiaju'
    allowed_domains = ['www.taobao.com', 's.taobao.com', 'item.taobao.com', 'g-search1.alicdn.com', 'click.simba.taobao.com']
    redis_key = 'taobao_jiaju:start_urls'
    custom_settings = {
        "LOG_FILE": os.path.normpath(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "logs/taobao_jiaju.log")),
        "IMAGES_STORE": os.path.normpath(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "media/images/taobao_jiaju/")),
    }

    def __init__(self, **kwargs):
        self.prefs = {"profile.managed_default_content_settings.images": 2}
        self.driver_path = os.path.normpath(os.path.join(os.path.dirname(os.path.dirname(__file__)), "util/seleniumdriver/chromedriver.exe"))
        # 设置不加载图片
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_experimental_option("prefs", self.prefs)
        # 创建chrome浏览器
        self.browser = webdriver.Chrome(executable_path=self.driver_path, chrome_options=chrome_options)
        #窗口最大化
        self.browser.maximize_window()
        super(TaoBaoJiaJuSpider, self).__init__()

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(TaoBaoJiaJuSpider, cls).from_crawler(crawler, *args, **kwargs)
        # 设置监听,当TaoBaoJiaJuSpider爬虫关闭时并关闭chrome浏览器
        crawler.signals.connect(spider.spider_closed, signals.spider_closed)
        return spider

    def spider_closed(self, spider):
        #关闭chrome浏览器
        logger.info(logger.info('%s:爬虫关闭,关闭浏览器', spider.name))
        self.browser.quit()

    def parse(self, response):
        if response.url == "https://www.taobao.com/":
            entrance_path = '/html/body/div[4]/div[1]/div[1]/div[1]/div/ul/li[12]/a[1]/@href'
            try:
                entrance_href = response.xpath(entrance_path).extract_first()
                if entrance_href:
                    yield Request(url=parse.urljoin(response.url, entrance_href), callback=self.parse)
                else:
                    logger.info('初始获取href属性值失败')
            except Exception as e:
                logger.info('初始url化规则发生改变,请重新调整entrance_href的xpath值')
        else:
            current_page_xpath = '//div[@id="minisrp-pager"]/div/div/div/ul/li[@class="item active"]/span/text()'
            current_page = response.xpath(current_page_xpath).extract_first(-1)
            # 获取下一页的分页链接【实时加载】
            next_page_xpath = '//div[@id="minisrp-pager"]/div/div/div/ul/li[@class="item active"]/following-sibling::li[1]'
            next_page_li_a_xpath = '//div[@id="minisrp-pager"]/div/div/div/ul/li[@class="item active"]/following-sibling::li[1]/a'
            try:
                next_page_li = self.browser.find_element_by_xpath(next_page_xpath)
                ActionChains(self.browser).context_click(next_page_li).perform()
                next_page_li_a = self.browser.find_element_by_xpath(next_page_li_a_xpath)
                next_page_li_a_href = next_page_li_a.get_attribute("href")
            except NoSuchElementException as e:
                logger.info('解析第【{0}】页下标签（li或a）失败'.format(current_page + 1))
                next_page_li_a_href = None
            except NoSuchAttributeException as e:
                logger.info('解析第【{0}】页下标签（a）的链接(href)失败'.format(current_page + 1))
                next_page_li_a_href = None
            # 获取当前页的所有家具商品链接
            items_xpath = '//*[@id="minisrp-itemlist"]/div[@class="m-itemlist"]/div[@class="grid g-clearfix"]/div[@class="items"]/div'
            items = response.xpath(items_xpath)
            for i in range(1, (len(items) + 1)):
                a = response.xpath(items_xpath + '[{0}]/div[1]/div[1]/div[1]/a[1]'.format(i))
                if a:
                    href = a.xpath('@href').extract_first()
                    if href:
                        yield Request(url=parse.urljoin(response.url, href), callback=self.jiaju_detail,
                                      meta={"current_page": current_page, "index": i})
                    else:
                        logger.info('解析第【{0}】页下第{1}个商品的标签的链接（href）失败'.format(current_page, i))
                else:
                    logger.info('解析第【{0}】页下第{1}个商品的标签（a）失败'.format(current_page, i))
            # if next_page_li_a_href:
            #     yield Request(url=parse.urljoin(response.url, next_page_li_a_href), callback=self.parse)

    def jiaju_detail(self, response):
        current_page = response.meta.get("current_page", "-1")
        index = response.meta.get("index", "-1")
        url = response.url
        logger.info('解析第【{0}】页下第{1}条: {2}'.format(current_page, index, url))
        url_object_id = get_md5_str(url)
        image_node = response.xpath('//*[@id="J_ImgBooth"]/@src')
        if image_node:
            image = parse.urljoin(response.url, image_node.extract_first(None))
        else:
            image = None
        title_node = response.xpath('//div[@id="J_Title"]/h3/@data-title')
        if title_node:
            title = title_node.extract_first("标题获取失败1")
        else:
            title = "标题获取失败1"
        market_price_node = response.xpath('//*[@id="J_StrPrice"]/em[2]/text()')
        if market_price_node:
            market_price = market_price_node.extract_first("市场价格获取失败1")
        else:
            market_price = "市场价格获取失败1"
        taobao_price_node = response.xpath('//*[@id="J_PromoPriceNum"]/text()')
        if taobao_price_node:
            taobao_price = taobao_price_node.extract_first("淘宝价格获取失败1")
        else:
            taobao_price = "淘宝价格获取失败1"

        """
        插入mysql,magondb,导出excel url去重
        """
        item = TaobaospiderItem()
        item['index'] = index
        item['url_object_id'] = url_object_id
        item['url'] = url
        item['title'] = title
        if image:
            item['image_urls'] = [image]
        item['push_date'] = get_datetime()
        item['market_price'] = market_price
        item['taobao_price'] = taobao_price
        yield item




