from scrapy.contrib_exp.crawlspider import CrawlSpider, Rule
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from scrapy.http import FormRequest
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import Compose
import re
from webScrapy.items import WebScrapyItem

def blankFilter(text):
	text = text[0].replace(u'\xa0', '')
	text = re.sub('\s', '', text)
	return text


class WebScrapyItemLoader(XPathItemLoader):
	default_output_processor = Compose(blankFilter)

class WebScrapySpider(CrawlSpider):
	name = 'webScrapy'
	allowed_domains = ['taobao.com']
	start_urls = [
		'http://s.taobao.com/search?q=%D0%AC&commend=all&ssid=s5-e&search_type=item&atype=&filterFineness=',]
	rules = (
		Rule(r'http://item.taobao.com/item.htm\?id=\d+$', 'level2Func'),
	)
	def level2Func(self, response):
		hxs = HtmlXPathSelector(response)
		result = hxs.select('//div[contains(@id, "content")]//div[contains(@class, "nav")]/ul//a[contains(@href, "user-rate")]/@href')
		if len(result.extract()) == 0:
			return
		return FormRequest(result.extract()[0], callback = self.itemFunc)

	def itemFunc(self, response):
		hxs = HtmlXPathSelector(response)
		result = hxs.select('//div[@id="shop-rate-box"]//div[contains(@class,"skin-gray")]//div[contains(@class,"bd")]//dd[2]/text()')
		if len(result.extract()) == 0:
			return
		return FormRequest(result.extract()[0], callback = self.adressFunc)

	def itemFunc(self, response):
		hxs = HtmlXPathSelector(response)
		item = WebScrapyItem()
		item['url'] = response.url
		l = WebScrapyItemLoader(item=item, selector=hxs)
		l.add_xpath('name', '//div[@id="shop-rate-box"]//div[contains(@class,"skin-gray")]//div[contains(@class,"bd")]//dd[1]/a/text()')
		l.add_xpath('adress', '//div[@id="shop-rate-box"]//div[contains(@class,"skin-gray")]//div[contains(@class,"bd")]//dd[2]/text()')
		return l.load_item()




