import scrapy
from bmw.items import BmwItem
from scrapy.spiders import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor

class Bmw5Spider(CrawlSpider):
    name = 'bmw5'
    allowed_domains = ['car.autohome.com.cn']
    start_urls = ['https://car.autohome.com.cn/pic/series/65.html']

    rules = (
        Rule(LinkExtractor(allow=r"https://car.autohome.com.cn/pic/series/65.+"),
             callback="parse_page", follow=True),
    )

    def parse_page(self, response):
        print('='*50)
        category = response.xpath('//div[@class="uibox"]/div/text()').get()
        image_urls = response.xpath('//div[contains(@class, "uibox-con")]/ul/li//img/@src').getall()
        srcs = map(lambda x:response.urljoin(x.replace('240x180_0_q95_c42_','')), image_urls)
        yield BmwItem(title=category, image_urls=srcs)

    def parse_test(self, response):
        print('='*50)
        uiboxs = response.xpath('//div[@class = "uibox"]')[1:]
        for uibox in uiboxs:
            catagory = uibox.xpath('.//div[@class = "uibox-title"]/a/text()').get()
            urls = uibox.xpath('.//ul/li/a/img/@src').getall()
            # 将列表中的每一项进行遍历传递给lambda表达式，并执行函数中的代码，再以返回值以列表形式进行返回,结果是map对象，接着使用list转换为列表
            urls = list(map(lambda url: response.urljoin(url), urls))
            yield BmwItem(title=catagory, image_urls=urls)

