# -*- coding: utf-8 -*-
import os, logging, time, json
logger = logging.getLogger(__name__)

from scrapy import signals, Selector
from selenium import webdriver
from scrapy.http import Request
from urllib import parse
from scrapy_redis.spiders import RedisSpider
from Spider.util.tool import get_md5_str,get_datetime
from Spider.items import TaobaoItem


class TaobaoSpider(RedisSpider):
    """
    RedisSpider只爬取start_url列表中的网页

    爬取淘宝网家具信息
    workon spider && D: && cd D:\workspace\oschina\Spider\Spider\spiders && scrapy runspider taobao.py

    c: && cd C:\Program Files\Redis && redis-cli.exe
    auth li1234redis
    lpush taobao:start_urls https://www.taobao.com/
    """
    name = 'taobao'
    allowed_domains = ['www.taobao.com', 's.taobao.com', 'item.taobao.com', 'g-search1.alicdn.com','click.simba.taobao.com']
    redis_key = 'taobao:start_urls'
    custom_settings = {
        "LOG_FILE": os.path.normpath(
            os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "logs/taobao.log")),
        "IMAGES_STORE": os.path.normpath(
            os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "media/images/taobao/")),
    }


    def __init__(self, **kwargs):
        self.chromeUrl = ["www.taobao.com", "s.taobao.com", "item.taobao.com", "click.simba.taobao.com"]
        self.driver_path = os.path.normpath(os.path.join(os.path.dirname(os.path.dirname(__file__)),
            "util/seleniumdriver/chromedriver.exe"))
        self.chrome_options = webdriver.ChromeOptions()
        self.chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})# 设置不加载图片
        self.chrome = webdriver.Chrome(executable_path=self.driver_path, chrome_options=self.chrome_options)# 创建chrome浏览器
        self.chrome.implicitly_wait(5)# 隐性等待3s对整个浏览器周期都起作用,只设置一次即可。不论页面有无加载成功,页面都加载3s才结束
        self.chrome.maximize_window()# 窗口最大化
        super(TaobaoSpider, self).__init__()

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(TaobaoSpider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.spider_closed, signals.spider_closed)# 当TaobaoSpider爬虫关闭时并关闭chrome浏览器
        return spider

    def spider_closed(self, spider):
        logger.info(logger.info('TaobaoSpider-%s:爬虫关闭,关闭浏览器', spider.name))
        self.chrome.quit()# 关闭chrome浏览器

    def parse(self, response):
        """
        :param response:
        :return:
        """
        text = response.text
        result = json.loads(text)
        # 入口链接
        if "www.taobao.com" in response.url:
            munu_jiaju_href = result.get("munu_jiaju_href", None)
            if munu_jiaju_href:
                yield Request(url=parse.urljoin(response.url, munu_jiaju_href), callback=self.parse)
        # 多次访问受限需要登陆
        elif "login.taobao.com" in response.url:
            time.sleep(60)
            yield Request(url=parse.urljoin(response.url, self.chrome.current_url), callback=self.parse)
        # 爬取当前分页列表的所有链接和下一页分页链接并发送请求
        elif "s.taobao.com" in response.url:
            next_pageButtonHref = result.get("next_pageButtonHref", None) #下一页分页链接
            current_pageNum = result.get("current_pageNum", None) #下一页分页号
            for index, jiaju_href in enumerate(result.get("jiaju_hrefs", [])): #当前分页的所有家具链接
                yield Request(url=parse.urljoin(response.url, jiaju_href), callback=self.loadItem,
                    meta={"current_pageNum": current_pageNum, "sequence": index+1})
                # print("1----" + jiaju_href)
            if next_pageButtonHref:
                yield Request(url=parse.urljoin(response.url, next_pageButtonHref), callback=self.parse)
                # print("2----"+next_pageButtonHref)
        else:
            logger.info('TaobaoSpider-parse该链接【{0}】不在有效范围之内'.format(response.url))

    def loadItem(self, response):
        """
        :param response:
        :return: TaobaoItem对象
        url_object_id = get_md5_str(url)
        """
        text = response.text
        result = json.loads(text)
        # 加载item
        item = TaobaoItem()
        item['push_date'] = get_datetime()  # 入库时间
        item['url'] = result.get("url", None)
        item['current_pageNum'] = result.get("current_pageNum", None)
        item['sequence'] = result.get("sequence", None)
        item['title'] = result.get("title", None)
        item['specie'] = result.get("specie", None)
        item['price'] = result.get("price", None)
        item['image_urls'] = result.get("image_urls", None)
        item['image_files'] = []
        item['evaluation_cumulative'] = result.get("evaluation_cumulative", None)
        item['evaluation_additional'] = result.get("evaluation_additional", None)
        item['evaluation_good'] = result.get("evaluation_good", None)
        item['evaluation_medium'] = result.get("evaluation_medium", None)
        item['evaluation_poor'] = result.get("evaluation_poor", None)
        yield item