import re
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from finallpro.items import FinallproItem,dangdangItem

class DangdangSpider(CrawlSpider):
    name = 'dangdang'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://www.dangdang.com/']

    rules = (
        Rule(SgmlLinkExtractor(allow=(r'/product.aspx')), callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)
        i = dangdangItem()
        price='000'
        list=["//div[@class='promotions']/span[@class='promotions_price_d']/b/text()",
                "//span[@id='salePriceTag']/text()",
                "//p[@class='price_d']/span[@class='num']/text()"]
        for tmp in list:
            if hxs.select(tmp):
                price=hxs.select(tmp).extract()[0]
                break
        i['price']=price
        title='error_message'
        tmp=hxs.select("//div[@class='dp_main']/div[@class='h1_title']/h1/text()")
        if tmp:
            title=tmp.extract()[0]
        else:
            tmp=hxs.select("//div[@class='dp_main']/div[@class='h1_title book_head']/h1/text()")
            if tmp:
                title=tmp.extract()[0]
        i['title']=title
        img=hxs.select("//img[@id='largePic']/@src")
        if img:
            i['img']=img.extract()[0]
        else:
            i['img']='rrrrrrrrrrrrrrrrrr'
        id=re.findall(r'product_id=(\d+)',response.url)
        if id:
            i['id']=id[0]
        else:
            i['id']='000000000000000000'
        #i['domain_id'] = hxs.select('//input[@id="sid"]/@value').extract()
        #i['name'] = hxs.select('//div[@id="name"]').extract()
        #i['description'] = hxs.select('//div[@id="description"]').extract()
        return i
