import scrapy


class HttpbinSpider(scrapy.Spider):
    name = "httpbin"
    # allowed_domains = ["httpbin.com"]
    # start_urls = ["https://httpbin.com"]

    async def start(self):
        # headers={
        #     'User-Agent': '44444',
        #     'cookie':'sessionid=abc222'
        # }
        # cookies={
        #     'sessionid':"awdawdawdawd1111"
        # }
        for url in [f"https://httpbin.org/get?age={i}" for i in range(1000)]:
            yield scrapy.Request(url)

    def parse(self, response):
        print(response.json())
