# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.project import get_project_settings
from RSpider.items import TestLoader
from RSpider.spiders.RedisClient import RedisClient
from RSpider.spiders.Fetcher import Fetcher
import threading

class BaseSpider(CrawlSpider):
    name = 'Base'
    allowed_domains = ['dmoz.org']

    def __init__(self, *a, **kw):
        super().__init__(*a, **kw)
        self.redis_client = RedisClient()
        self.fetcher = Fetcher()

    def start_requests(self):
        settings = get_project_settings()
        seed_urls = settings.get('SEED_URLS', [])
        for url in seed_urls:
            yield scrapy.Request(url=url, callback=self.parse)
        node_name = 'url_queue'
        threading.Thread(target=self.redis_client.send_urls_periodically, args=(node_name,)).start()
        yield from self.consume_urls(node_name)

    def consume_urls(self, node_name):
        for url in self.redis_client.consume_urls(node_name):
            yield scrapy.Request(url=url, callback=self.parse)

    rules = (
        Rule(LinkExtractor(restrict_xpaths='//div[@id="catalogs"]')),
        Rule(LinkExtractor(restrict_xpaths='//ul[@class="directory dir-col"]'), callback='parse_directory', follow=True)
    )

    def parse_directory(self, response):
        for li in response.css('ul.directory-url > li'):
            tl = TestLoader(selector=li)
            tl.add_css('name', 'a::text')
            tl.add_css('description', '::text')
            tl.add_css('link', 'a::attr(href)')
            tl.add_value('url', response.url)
            yield tl.load_item()

    def spider_closed(self):
        pass