import scrapy

#实体类
class CarKoubeiItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    name = scrapy.Field()  #车系
    grade = scrapy.Field() #评分
    date = scrapy.Field()  #评分日期

class KoubeiSpiderSpider(scrapy.Spider):
    # 爬虫名字
    name = 'koubei_spider'
    #请求的站点
    base_url = 'https://koubei.16888.com/'
    start_urls = ['https://koubei.16888.com/new.html']

    def parse(self, response):
            # 获取最大分页数，因为分页命名有规律的，new_1.html,new_2.html
            max_page = response.xpath('/html/body/div[7]/div[1]/div[2]/div/a[last()-1]/text()').extract()[0].strip()

            print(f'最大页面数：{max_page}')

            # 逐个页面处理，发出第二次请求
            for i in range(1,int(max_page)+1):
                s1 = 'new_' + str(i) + '.html'
                s2 = self.base_url + s1
                print(s2)
                yield scrapy.Request(url=s2, callback=self.parse_page)


    # 分页爬取
    def parse_page(self, response):
        dl_arr = response.xpath('//div[@class="wrap"]/div[@class="main_left f_l"]/div[@class="mouth_box"]/dl')
        for dl in dl_arr:
            text1 = dl.xpath('dd/p[@class="add"]/a/text()').extract()
            if len(text1) > 0:
                name = text1[0].strip().split(' ')[0]
            else:
                name = ''

            text2 = dl.xpath('dd/p[@class="add"]/text()').extract()
            if len(text2) > 0:
                grade = text2[0].split('分')[0].strip()
            else:
                grade = ''

            text3 = dl.xpath('dd/p[@class="add"]/span[@class="m_l_10"]/text()').extract()
            if len(text3) > 0:
                date = text3[0].strip()
            else:
                date = ''
            print(f'{name},{grade},{date}')

            #把数据映射到 item
            item = CarKoubeiItem()
            item['name'] = name
            item['grade'] = grade
            item['date'] = date
            print(f'item===>{item}')
            yield item
