from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from finallpro.items import FinallproItem

class TmallSpider(CrawlSpider):
    name = 'tmall'
    allowed_domains = ['tmall.com']
    start_urls = ['http://www.tmall.com/']

    rules = (
		Rule(SgmlLinkExtractor(allow=(r'id=\d+')),callback='tmall',follow=True),
		Rule(SgmlLinkExtractor(allow=(r'item.htm'))),
		Rule(SgmlLinkExtractor(allow=(r'search_product.htm'))),
    )

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)
        i = FinallproItem()
        #i['domain_id'] = hxs.select('//input[@id="sid"]/@value').extract()
        #i['name'] = hxs.select('//div[@id="name"]').extract()
        #i['description'] = hxs.select('//div[@id="description"]').extract()
        return i
	def tmall(self,response):
		hxs=HtmlXPathSelector(response)
		i=tmallItem()
		tmp=hxs.select("//strong[@id='J_StrPrice']/text()")
		if tmp:
			i['price']=tmp.extract()[0]
		else:
			i['price']=0

