# -*- coding:UTF-8 -*-
# coding:utf-8
import scrapy,requests,time

from scrapy_test.settings import SPIDER_URL_TEMPLATE
from scrapy_test.settings import PAGE_NUM
from scrapy_test.settings import PAGE_NUM_END
from scrapy_test.items import ImageItem
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

class MeiziSpider(scrapy.Spider):
	name = "mz"
	download_delay = 3
	allowed_domains = ["www.mzitu.com"]
	start_urls = [SPIDER_URL_TEMPLATE % str(x+1) for x in range(PAGE_NUM,PAGE_NUM_END)]
	header = {
		'Host': 'www.mzitu.com',
		'Connection': 'keep-alive',
		'Cache-Control': 'max-age=0',
		'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
		'accept-language': 'zh-CN,zh;q=0.8',
		'referer': 'https://www.mzitu.com/xinggan/',
		'user-agent': "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
	}

	cookie={
		'Hm_lvt_dbc355aef238b6c32b43eacbbf161c3c':'1502710792,1503722213,1503736505',
		'Hm_lpvt_dbc355aef238b6c32b43eacbbf161c3c':'1503736508'
	}

	def start_requests(self):
		self.log('>>>>>>>>>>>>>>>>>>')
		start_urls = ['http://www.mzitu.com/japan/page/8/']
		for url in start_urls:
			self.log(url)
			yield scrapy.Request(url=url, callback=self.parse,headers=self.header,cookies=self.cookie)

	def parse(self, response):
		for info in response.css('.postlist')[:1]:
			#l['title']=info.css('img::attr(alt)').extract_first()
			page_url=info.css('a::attr(href)').extract_first()
			print info.css('img::attr(alt)').extract_first()
			print info.css('a::attr(href)').extract_first()
			yield scrapy.Request(url=page_url,callback=self.proccess_page)


	def proccess_page(self,response):
		item = ImageItem()
		item['image_urls']=[response.css('.main-image img::attr(src)').extract_first()]
		item['images']=[response.css('.main-title::text').extract_first()]
		item['referer'] =response.url
		print response.url + '>>>>>' + item['image_urls'][0]
		next_url = response.css('.main-image a::attr(href)').extract_first()
		yield item
		yield scrapy.Request(url=next_url,callback=self.proccess_page)

