#_*_coding:utf8_*_
import re
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from finallpro.items import FinallproItem,buyItem,amazonItem
from test import gettext
class Buy360Spider(CrawlSpider):
    name = 'buy360'
    allowed_domains = ['360buy.com']
    start_urls = ['http://www.360buy.com/']

    rules = (
        Rule(SgmlLinkExtractor(allow=(r'product/\d+.html')),callback='get',follow=True),
        Rule(SgmlLinkExtractor(allow=(r'products/\d+-\d+-\d+.html')),follow=True),
        Rule(SgmlLinkExtractor(allow=(r'products/\d+-\d+-\d+-0-0-0-0-0-0-0-1-1-\d+.html'))),
        )

    def get(self,response):
        hxs=HtmlXPathSelector(response)
        i=buyItem()
        title=hxs.select("//div[@id='name']/h1/text()")
        if title:
            i['title']=title.extract()[0]
        else:
            i['title']='ttttttttttttt'
        img=hxs.select("//div[@id='spec-n1' and @class='jqzoom']/img/@src")
        if img:
            i['img']=img.extract()[0]
        else:
            i['img']='ttttttttttttttttttttttttttttttttttt'
        url=response.url
        tmp=re.findall(r'(\d+).html',url)
        if tmp:
            i['id']=tmp[0]
            if len(tmp[0])==6 or len(tmp[0])==10:
              #  目前来说，六位的id的价格都是用图片做的
			  #  家具类是10位的,不过电子书好像也是10的,要解决
              #   可恶的权限问题
			  #	权限问题在于谁运行
                tmp1=hxs.select("//ul[@id='summary']/li[2]/div[1][@class='fl']/strong[@class='price']/img/@src")
                if tmp1:
                    url=tmp1.extract()[0]
                    price=gettext(url)
                    price=re.findall(r'(\d+)',price)
                    i['price']=price
                else:
                    i['price']=000000000000
              #i['price']=000000000000000
            else:
                te=hxs.select("//script/@src").extract()
                ss=re.compile(r'price-b')
                st=re.compile(r'"P":"(.*)",')
                i['price']=0000000000
                for tmp in te:
                    if ss.findall(tmp):
                        tmp1=urllib2.urlopen(tmp)
                        t=tmp1.read()
                        tmp1=st.findall(t)
                        i['price']=tmp1[0]
                        break
        else:
            i['id']=0000000000000000
            i['price']=00000000000
        return i



