﻿from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from zouxiu.items import ZouxiuItem1
from zouxiu.items import ZouxiuItem2
from scrapy.contrib.loader import XPathItemLoader
from scrapy.http import Request
from scrapy import log
import time
import re

class ZouXiuSpider1(BaseSpider):
    name = "zouxiu1"
    allowed_domains = ["www.xiu.com"]
    start_urls = [
        "http://www.xiu.com/index_0.shtml"
    ]

    def parse (self, response):
        time_now = str(time.localtime().tm_year) + '/' + str(time.localtime().tm_mon) + '/' + str(time.localtime().tm_mday)
        hxs = HtmlXPathSelector(response)
        temp = hxs.select('/html/body').select('.//div[@class="bd"]').select('.//a[contains(@target,"_blank")]')
        #below diff behavior 
        #titles = temp.select('.//div[@class="bd"]').select('.//a[contains(@target,"_blank")]').select('./span[normalize-space()]').extract()
        #titles = temp.select('.//div[@class="bd"]').select('.//a[contains(@target,"_blank")]').select('./span/br').extract()
        #titles = temp.select('normalize-space(./span)').extract()
        #urls = temp.select('@href').extract()
        items = []
        for title in temp:
            item = ZouxiuItem1()
            l = XPathItemLoader(item=item, selector=title)
            l.add_xpath('name','normalize-space(./span)')
            l.add_xpath('url','@href')
            l.add_value('time',time_now)
            yield l.load_item()

class ZouXiuSpider2(BaseSpider):
    name = "zouxiu2"
    allowed_domains = ["xiu.com"]
    start_urls = [
        "http://www.xiu.com/index_0.shtml"
    ]
    DOWNLOAD_DELAY = 1
    id_output = 0

    def parse (self, response):
        time_now = str(time.localtime().tm_year) + '/' + str(time.localtime().tm_mon) + '/' + str(time.localtime().tm_mday)
        hxs_bands = HtmlXPathSelector(response)
        urls = hxs_bands.select('/html/body').select('.//div[@class="bd"]').select('.//a[contains(@target,"_blank")]/@href')
        for url in urls:
            yield Request(url.extract(), callback=self.parse_list)

    def parse_list (self, response):
        hxs_list = HtmlXPathSelector(response)
        pattern = re.compile(r'http://brand.xiu.com/(\d+).html')
        match = pattern.match(response.url)
        if match:
            band_id = match.group(1)  #品牌序号
        page_num_node = hxs_list.select('/html/body').select('.//div[@class="page_div page_noe"]').select('.//li[@class="pageTo"]/text()')[0].extract().strip()
        pattern = re.compile(ur'\u5171(\d+)\u9875')
        match = pattern.match(page_num_node)
        if match:
            page_num = int(match.group(1))   #分页数目
        for i in range(0,page_num):
            item_index = 40 * i
            url_page = 'http://list.xiu.com/00000.html?searchTerm=&pageView=image&minPrice=&maxPrice=&metaData=&facet=&orderBy=6&instalment=&brandid='
            url_page += band_id
            url_page += '&beginIndex='
            url_page += str(item_index)
            #print url_page 
            yield Request(url_page, callback=self.parse_page)
            #Requests.append(Request(url_page , callback=self.parse_page))

    def parse_page (self, response):
        print response.url
        hxs_page = HtmlXPathSelector(response)
        items_cur_page = hxs_page.select('//a[@class="img_a"]/@href')
        #items_cur_page = hxs_page.select('/html/body').select('.//div[@class="l-b"]').select('.//div[@class="e_img_div"]').select('.//a[@class="img_a"]/@href')
        for item in items_cur_page:
            yield Request(item.extract() , callback=self.parse_item)
            #Requests.append(item.extract())

    def parse_item (self, response):
        ZouXiuSpider2.id_output += 1
        filename = "C:/down/" + str(ZouXiuSpider2.id_output) + ".html"
        open(filename,'wb').write(response.body)
