from typing import AsyncIterator, Any

import scrapy

class QuotesYcombinatorSpider(scrapy.Spider):
    name = "quotes_ycombinator"
    # start_urls = [f"https://news.ycombinator.com/?p={page}" for page in range(1, 35)]

    async def start(self) -> AsyncIterator[Any]:
        yield scrapy.Request('https://news.ycombinator.com/')

    def parse(self, response):
        lines = response.xpath('//td[@class="title"]/span[@class="titleline"]/a')
        next_url = response.xpath('//td[@class="title"]/a[@rel="next"]/@href').get()
        for i in lines:
            title = i.xpath('./text()').get()
            url = i.xpath('./@href').get()
            print({
                "title": title,
                "url": url
            })
        if next_url:
            yield response.follow(next_url)
        else:
            print("over----------------------------")
