import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from baoma.items import BaomaItem


class Baoma5Spider(CrawlSpider):
    name = 'baoma5'
    allowed_domains = ['car.autohome.com.cn']
    start_urls = ['https://car.autohome.com.cn/pic/series/65.html']

    rules = (
        Rule(LinkExtractor(allow=r"https://car.autohome.com.cn/pic/series/65.+"),
             callback="parse_page", follow=True),
    )

    def parse_page(self, response):
        print('='*50)
        category = response.xpath('//div[@class="uibox"]/div/text()').get()
        print('='*50)
        print(category)
        image_urls = response.xpath('//div[contains(@class, "uibox-con")]/ul/li//img/@src').getall()
        srcs = map(lambda x:response.urljoin(x.replace('240x180_0_q95_c42_','')), image_urls)
        item = BaomaItem(title=category, image_urls=srcs)
        yield item

    # def parse_item(self, response):
    #     print('='*50)
    #     item = {}
    #     print('='*50)
    #     #item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
    #     #item['name'] = response.xpath('//div[@id="name"]').get()
    #     #item['description'] = response.xpath('//div[@id="description"]').get()
    #     return item
