import scrapy

from  geektime.items import geektimeScrapyDemoItem


class GeektimeitemspiderSpider(scrapy.Spider):
    name = "geektimeItemSpider"
    allowed_domains = ["quotes.toscrape.com"]
    start_urls = ["https://quotes.toscrape.com"]

    def parse(self, response):
        for quote in response.css('div.quote'):
            item = geektimeScrapyDemoItem()
            item['text'] = quote.css('span.text::text').get()
            item['author'] = quote.css('small.author::text').get()
            item['tags'] = quote.css('div.tags a.tag::text').getall()
            yield item

        yield from response.follow_all(css='ul.pager a', callback=self.parse)
