import scrapy


class WhatBuySpider(scrapy.Spider):
    name = 'what_buy'
    allowed_domains = ['m.smzdm.com', 'smzdm.com']
    start_urls = ['https://m.smzdm.com/youhui/']
    page_num = 1

    def parse(self, response):
        # li提取
        li_list = response.xpath('//div[@class="wrap-content"]/ul/li')

        for li_temp in li_list:
            titles = li_temp.xpath('.//div[@class="card-content"]/div[@class="card-title"]/text()').extract_first()
            prices = li_temp.xpath('.//div[@class="card-content"]/div[@class="card-price"]/text()').extract_first()
            malls = li_temp.xpath(
                './/div[@class="card-content"]/div[@class="card-actions"]/div/span/span[@class="card-mall"]/text()').extract_first()
            datas = li_temp.xpath(
                './/div[@class="card-content"]/div[@class="card-actions"]/div/span/span[2]/text()').extract_first()

            yield {
                "title": titles,
                "prices": prices,
                "malls": malls,
                "datas": datas
            }

        # 构造下一页的URL
        self.page_num += 1
        if self.page_num <= 10:
            next_page_url = "https://m.smzdm.com/youhui/page=%s" % self.page_num + 1
            # 生成Request，爬取下一页的数据
            yield scrapy.Request(url=next_page_url, callback=self.parse)
