#coding=utf-8
import scrapy
import os
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

'''
for job pausing:
scrapy crawl meizi -s JOBDIR=crawls/meizi-1
'''

class MeiziSpider(scrapy.Spider):
	name = "meizi"

	def start_requests(self):
		urls = [
			"http://www.mzitu.com/all",
		]
		for url in urls:
			yield scrapy.Request(url=url, callback=self.parse_archives)

	def parse_archives(self, response):
		for i, year_div in enumerate(response.css("div.year")):
			year = year_div.css("::text").extract_first()
			if not os.path.exists(r"meizi/"+year+r"/"):
				os.makedirs(r"meizi/"+year+r"/")
			for j, month_li in enumerate(response.css("ul.archives")[i].css("li")):
				month = month_li.css("p.month>em::text").extract_first()
				if not os.path.exists(r"meizi/"+year+r"/"+month+r"/"):
					os.makedirs(r"meizi/"+year+r"/"+month+r"/")
				for k, album_a in enumerate(month_li.css("p.url>a")):
					day = month_li.css("p.url::text").extract()[k]
					album_name = month_li.css("p.url>a::text").extract()[k]
					dir_name = re.sub(r"[\/\\\:\*\?\"\<\>\|]", "", day+album_name)
					if not os.path.exists(r"meizi/"+year+r"/"+month+r"/"+dir_name+r"/"):
						os.mkdir(r"meizi/"+year+r"/"+month+r"/"+dir_name+r"/")
					else:
						continue
					album_link = month_li.css("p.url>a::attr(href)").extract()[k]
					if album_link is not None:
						save_path = r"meizi/"+year+r"/"+month+r"/"+dir_name+r"/"
						yield scrapy.Request(url=album_link, callback=self.parse_album, meta={"save_path": save_path, "end_link": ""})

	def parse_album(self, response):
		save_path = response.meta["save_path"]
		img_link = response.css(".main-image img::attr(src)").extract_first()
		next_link = response.css(".main-image a::attr(href)").extract_first()
		end_link = response.css(".pagenavi a::attr(href)").extract()[-2]
		if img_link is not None:
			yield scrapy.Request(img_link, callback=self.download_photo, meta={"save_path": save_path, "end_link": end_link})
		if response.url == response.meta["end_link"]:
			print "finish album "+save_path
			return
		else:
			yield scrapy.Request(next_link, callback=self.parse_album, meta={"save_path": save_path, "end_link": end_link})

	def download_photo(self, response):
		save_path = response.meta["save_path"]
		file_name = response.url.split(r"/")[-1]
		with open(r""+save_path+file_name, "wb") as f:
			f.write(response.body)
		print "finish scratching "+save_path+file_name
		return
