﻿# -*- coding: utf-8 -*
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector

class diannao(BaseSpider):
    name = 'coo8'
    allowed_domains = ["coo8.com"]
    start_urls = []
    for id in range(1,16):
        start_urls.append('http://www.coo8.com/products/600-0-0-0-'+str(id)+'-0-103107110.html')
    #start_urls.append('http://www.coo8.com/products/600-0-0-0-'1'-0-103107110.html')
        
    def parse(self,response):
        print "enter in parse"
        items = []
        hxs = HtmlXPathSelector(response)
        urls = hxs.select('//div[@class="srchContent"]/ul/li/p[@class="pic"]/a/@href').extract()
        #print 'ggggggggggggggg',urls
        items.extend(self.make_requests_from_url(url).replace(callback=self.main_page)
                    for url in urls)
        return items
            
    def main_page(self,response):
        print 'enter in main' 
        my_dict = {} 
        hxs = HtmlXPathSelector(response)
        url = hxs.select('//div[@class="crumb"]/a/@href').extract()[4]
        price = hxs.select('//p[@class="pActions clearfix"]/a[@class="installment"]/text()').extract()
        print price ,'price'
        print url,'urlurl'
        datas = hxs.select('//div[@id="F2"]/div[@class="bd pData"]/table/tr')
        #tatas = hxs.select('//div[@class = "bd pData"]/table/tbody/tr/td[@class="left"]/text()').extract()
        #print datas,'ffffffffffffffffffffffff'
        for data in datas :
            print 'data' 
            tr = data.select('td/text()').extract()
            
            if len(tr) == 2:
                my_dict[tr[0]] = tr[1]
                #print my_dict,'my_dict[tr[0]] = tr[1]'
            else :
                pass
            #pinpai = u'品牌'
            '''if my_dict[pinpai]:
                print 'my_dict["品牌"]',my_dict[pinpai]
            else:
                print 'none'''
        czxt = u'操作系统'
        print my_dict
        return my_dict
        