# -*- coding: utf-8 -*-
import scrapy,redis,time,random
from urllib.parse import quote
from scrapy import Request
from mastergoodread.items import MastergoodreadItem

class GoodreadSpider(scrapy.Spider):
    name = 'goodread'
    allowed_domains = ['goodreads.com']
    base_urls = ['https://www.goodreads.com']
    
    def start_requests(self):
        r = redis.Redis(host = self.settings.get("REDIS_HOST"),port = self.settings.get("REDIS_PORT"),decode_responses=True)
        while r.llen('goodreads:start_urls'):
            url = r.lpop('goodreads:start_urls')
            print('Start Request')
            print('Start Request')
            print('Start Request')
            yield Request(url = url, callback = self.parse, dont_filter = True)
        #pass

    def parse(self, response):
        print(response.url)
        print(response.url)
        print(response.url)
        lists = response.css('a.bookTitle::attr(href)').extract()
        #print(lists)
        if lists:
            for i in lists:
                item = MastergoodreadItem()
                item['url'] = i
                yield item
        #pass
        next_url = response.css("a.next_page::attr(href)").extract_first()
        if next_url:
            url = response.urljoin(next_url)
            yield scrapy.Request(url = url,callback = self.parse)
