# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from bmw.items import BmwItem


class Bmw5Spider(CrawlSpider):
    name = 'bmw5'
    allowed_domains = ['car.autohome.com.cn']
    start_urls = ['https://car.autohome.com.cn/pic/series/65.html#pvareaid=3454438']

    rules = (
        Rule(LinkExtractor(allow=r'https://car.autohome.com.cn/pic/series/65.+'), callback='parse_page', follow=True),
    )


    # 没使用crawlspider之前
    # def parse(self, response):
    #     # 总的名称
    #     cartab_title = response.xpath('//div[@class="cartab-title"]/h2/a/text()').extract_first()
    #     uiboxs = response.xpath('//div[@class="uibox"]')[1:]
    #     for uibox in uiboxs:
    #         # 每一个分类（车身，内置，外观）的名称
    #         category_title = uibox.xpath('.//div[@class="uibox-title"]/a/text()').extract_first()
    #         # 每一个图片的url
    #         urls = uibox.xpath('.//ul/li//img/@src').extract()
    #         # 遍历urls对每一个入了进行处理的快速方法
    #         urls = list(map(lambda url:response.urljoin(url), urls))
    #         item = BmwItem(cartab_title=cartab_title, category_title=category_title, image_urls=urls)
    #         yield item

    # 使用crawlspider
    def parse_page(self, response):
        cartab_title = response.xpath('/html/body/div[2]/div/div[2]/div[7]/div/div[1]/h2/a/text()').get()
        category_title = response.xpath('//div[@class="uibox"]/div/text()').get()
        srcs = response.xpath('//div[contains(@class,"uibox-con")]/ul/li//img/@src').extract()
        '''
        清晰图片地址规则：
         改装：
            'https://car1.autoimg.cn/upload/2012/8/22/240x180_0_q95_c42_autohomecar__201208221938156714122.jpg'
            'https://car1.autoimg.cn/upload/2012/8/22/1024x0_1_q95_autohomecar__201208221938156714122.jpg'
        其他：
            'https://car3.autoimg.cn/cardfs/product/g29/M00/5A/D7/240x180_0_q95_c42_autohomecar__ChcCSF2J-8OAPNlMAAjph6Y7Au8470.jpg'
            'https://car3.autoimg.cn/cardfs/product/g29/M00/5A/D7/autohomecar__ChcCSF2J-8OAPNlMAAjph6Y7Au8470.jpg#pvareaid=2042293'
        '''
        # 爬取速度挺快，但是建议只爬去一部分就行了，只是用来练习的
        if category_title != '改装':
            # map(函数t，列表)，函数便利srcs列表的每一个src，然后传入tlim进行处理，此处为将't_'代理为''，相当于把't_'删除
            srcs = list(map(lambda src: src.replace('240x180_0_q95_c42_', ''), srcs))
        elif category_title == '改装':
            srcs = list(map(lambda src: src.replace('240x180_0_q95_c42', '1024x0_1_q95'), srcs))
        # 添加http开始的部分
        # 方法一
        # urls = []
        # for src in srcs:
        #     url = response.urljoin(src)
        #     urls.append(url)
        # 方法二
        srcs = list(map(lambda x: response.urljoin(x), srcs))
        item = BmwItem(cartab_title=cartab_title, category_title=category_title, image_urls=srcs)
        yield item
