# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from alldangdang.items import AlldangdangItem

class DanglistSpider(CrawlSpider):
    name = 'danglist'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://category.dangdang.com/?ref=www-0-C']
    rules = (
        Rule(LinkExtractor(allow=r'cid\d+\.html'), callback='parse_item', follow=True),
        Rule(LinkExtractor(allow=r'category\.dangdang\.com/pg[0-9]{1,3}-cid\d+\.html'), callback='parse_item', follow=True),
        Rule(LinkExtractor(allow=r'pg\d+-cid\d+\.html'), callback='parse_item', follow=True),
        Rule(LinkExtractor(allow=r'cp\d+.*?\.html'), callback='parse_item', follow=True),
        Rule(LinkExtractor(allow=r'pg\d{1,3}-cp\d+.*?\.html'), callback='parse_item', follow=True),
         ) 


    def parse_item(self, response):
        i = AlldangdangItem()
        i["name"] = response.xpath("//p[@class='name']/a/@title").extract()
        i["price"] = response.xpath("//p[@class='price']/span/text()|//p[@class='price']/span[1]/text()").extract()
        i["link"] = response.xpath("//p[@class='name']/a/@href").extract()
        i["shopname"] = response.xpath("//p[@class='link']/a/text()|//li/p[4]/text()").extract()
        i["comnum"] = response.xpath("//p[@class='star']/a/text()|//li/p[5]/a/text()").extract()
        yield i
        #for j in range(1,100):
        #    url = 'http://category.dangdang.com/pg'+str(j)+'-cid4006497.html'
        #    yield Request(url,callback=self.parse)