# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy import Request
from scrapy import Spider
from scrapy import http
from pyquery import PyQuery as pq
import re
from zms.items import ZmsItem
localDir = '/Users/mac/Pictures/zms1/'

class ZanMeiShiSpider(Spider):
    name = "zms"
    allowed_domains = ["zanmeishi.com"]
    start_urls = []
    for i in range(1,1000) :
        start_urls.append('http://www.zanmeishi.com/tab/%s.html' %(i))

    def parse(self, response):
        doc = pq(response.body_as_unicode())
        title = doc('title').text().split('-')[0].strip().replace(r'/',r'|')
        #zurl = doc(".notice").next().find('a').attr('href')
        item = ZmsItem()
        item["title"] = title
        item["picUrl"] = zurl = doc(".notice").next().find('a').attr('href')
        item["index"] = re.findall(r'\d+',response.url)[0]
        item["ext"] = zurl[zurl.rfind('.'):]
        print item
        '''是two methods pass args
        yield Request(url=item_details_url, meta={'item': item},
            callback=self.parse_details)
        yield Request(url, callback=lambda response, typeid=5: self.parse_type(response,typeid))
        def parse_type(self,response, typeid):
            print typeid
        '''
        #yield Request(zurl,callback=self.parse2)
        yield scrapy.Request(url=zurl,meta={'item':item},callback=self.parse2)
        #yield item
        #return item
    def parse2(self,response):
        item = response.meta['item']
        print item
        with open('%s%s.%s%s' % (localDir,item["index"],item["title"],item["ext"]), 'wb') as f:
            f.write(response.body)
        #print response.url


