# -*- coding:UTF-8 -*-
import scrapy
from  douban.items import DoubanItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

class DoubanSpider(CrawlSpider):
    name = "douban"
    
    start_urls = ['https://book.douban.com']

    rules = (
            Rule(LinkExtractor(allow = r'https://book.douban.com', 
                                restrict_xpaths=('//*[@class="hot-tags-col5 s"]/li/ul/li/a')),
            callback = 'parse_item',
            follow = True),
            
            Rule(LinkExtractor(allow = r'https://book.douban.com/tag', 
                                restrict_xpaths=('//*[@id="subject_list"]/div[2]/span/a')),
            callback = 'parse_item',
            follow = True),
            
            )    

    def parse_start_url(self, response):
        print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
        hrefs = response.css('a').xpath('@href').extract()
        for href in hrefs:
            link = response.urljoin(href)
            item = DoubanItem()
            item['link'] = link
            yield item
        print("*******************************************")

    def parse_item(self, response):        
        lis = response.xpath('//*[@id="subject_list"]/ul/li')
        for li in lis:
            book_href = li.xpath('div[@class="info"]/h2/a').xpath('@href').extract()
            book_name = li.xpath('div[@class="info"]/h2/a').xpath('@title').extract()
            #print("=============================== book_href ============" + str(book_href))
            #print("=============================== book_name ============" + str(book_name))
            #book_link = response.urljoin(str(book_href))
            item = DoubanItem()
            item['link'] = book_href
            item['name'] = book_name
            yield item
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")

        
        
