import scrapy


class FavirateSpider(scrapy.Spider):
    name = "favirate"
    allowed_domains = ["xiachufang.com"]
    # start_urls = ["https://xiachufang.com"]

    def start_requests(self):
        urls = [f"https://xiachufang.com/page{i}" for i in range(1, 21)]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse1)

    def parse1(self, response):
        for item in response.xpath('//div[contains(@class, "recipe-215-horizontal")]'):
            img_link = item.xpath('.//img/@data-src').get()
            link = item.xpath('.//p[@class="name"]/a/@href').get()
            title = item.xpath('.//p[@class="name"]/a/text()').get().strip()
            tags = item.xpath('.//p[contains(@class, "ing")]/a/text()').getall()
            num = item.xpath('.//p[contains(@class, "stats")]/span/text()').get()
            author = item.xpath('.//p[@class="author"]/a/text()').get()

            data = {
                "title": title,
                "link": link,
                "img_link": img_link,
                "tags": tags,
                "num": num,
                "author": author
            }
            yield data
