import scrapy
from bs4 import BeautifulSoup
from ..items import TpycarItem


class TpycarSpider(scrapy.Spider):
    name = "tpycar"
    allowed_domains = ["price.pcauto.com.cn"]
    """ 
      单线程操作
      # 目标url   https://price.pcauto.com.cn/top/k0-p2.html  https://price.pcauto.com.cn/top/k0-p3.html
        start_urls = ["https://price.pcauto.com.cn/top/k0.html"]
        i = 1
    """
    # 重写父类方法
    def start_requests(self):
        for index in range(1, 4):
            url = f'https://price.pcauto.com.cn/top/k0-p{index}.html'
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        soup = BeautifulSoup(response.text, 'lxml')
        # 获取数据
        li_list = soup.select('.listA>li')
        for li in li_list:
            # 通过 class    TpycarItem  创建实例
            car = TpycarItem()
            # 型号
            type = li.select('.sname')[0].text.strip()
            car['type'] = type
            # 价格
            price = li.select('.price')[0].text.strip()
            car['price'] = price
            # 品牌
            brand = li.select('.col1')[1].text.strip()
            car['brand'] = brand
            # 热度  rank
            rank = li.select('.rank')[0].text.strip()
            car['rank'] = rank
            # 将数据传入管道文件
            yield car
        print('------------------------一页数据获取完毕------------------------------')

        """
        单线程数据获取模式
         # 多页面数据获取 https://price.pcauto.com.cn/top/k0-p2.html  https://price.pcauto.com.cn/top/k0-p3.html
        if self.i < 3:
            self.i += 1
            url = 'https://price.pcauto.com.cn/top/k0-p{}.html'.format(self.i)

            # 继续发起http请求  callback   请求响应后回调函数
            yield scrapy.Request(url=url, callback=self.parse)
        """
