import scrapy


class JsSpider(scrapy.Spider):
    name = 'js'

    def start_requests(self):
        urls = ['http://quotes.toscrape.com/js/page/1/',
                'http://quotes.toscrape.com/js/page/2/',
                'http://quotes.toscrape.com/js/page/3/']
        for url in urls:
            yield scrapy.Request(url, meta={'js': True})

    def parse(self, response):
        print('--> processing page %s' % response.url)
        num = response.url.split('/')[-2]
        open('page_%s.html' % num, 'w').write(response.text)
