# -*- coding:UTF-8 -*-
# coding:utf-8
import scrapy,requests,time
from scrapy_test.items import ScrapyTestItem
from scrapy_test.settings import SPIDER_URL_TEMPLATE
from scrapy_test.settings import PAGE_NUM
from scrapy_test.settings import PAGE_NUM_END

import sys
reload(sys)
sys.setdefaultencoding('utf-8')



class ComicSpider(scrapy.Spider):
	name = "images"
	download_delay = 3
	allowed_domains = ["www.mzitu.com"]
	start_urls = [SPIDER_URL_TEMPLATE % str(x+1) for x in range(PAGE_NUM,PAGE_NUM_END)]
	header = {
		'Host': 'www.mzitu.com',

		'Connection': 'keep-alive',
		'Cache-Control': 'max-age=0',
		'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
		'accept-language': 'zh-CN,zh;q=0.8',
		'referer': 'https://www.mzitu.com/xinggan/',
		'user-agent': "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
	}

	cookie={
		'Hm_lvt_dbc355aef238b6c32b43eacbbf161c3c':'1502710792,1503722213,1503736505',
		'Hm_lpvt_dbc355aef238b6c32b43eacbbf161c3c':'1503736508'
	}

	def start_requests(self):
		self.log('>>>>>>>>>>>>>>>>>>')
		# www.meizitu.com
	    # start_urls = [SPIDER_URL_TEMPLATE % str(x + 1) for x in range(PAGE_NUM, PAGE_NUM_END)]
	 	for url in self.start_urls:
			self.logger.debug("#############################################")
			self.logger.debug(url)
			yield scrapy.Request(url=url, callback=self.parse,headers=self.header,cookies=self.cookie)

	def parse(self, response):
		l =ScrapyTestItem()
		#items=[]
		for info in response.css('.postlist')[:1]:
			#l['title']=info.css('img::attr(alt)').extract_first()
			page_url=info.css('a::attr(href)').extract_first()
			print info.css('img::attr(alt)').extract_first()
			print info.css('a::attr(href)').extract_first()
			yield scrapy.Request(url=page_url,callback=self.proccess_page)





	def proccess_page(self,response):
		item = ScrapyTestItem()
		item['link']=response.css('.main-image img::attr(src)').extract_first()
		item['title']=response.css('.main-title::text').extract_first()
		item['file_urls'] = response.url
		next_url = response.css('.main-image a::attr(href)').extract_first()
		print item['link']+'>>>>>'+item['title']
		self.save_image(url,response.url)
    	#pages=response.css('.pagenavi a::attr(href)').extract()
		#for page in pages[1:] #body > div.main > div.content > h2
		#	self.logger.debug("page %s" % page)
		#	self.save_image(page,response.url)
		#yield item
		#yield scrapy.Request(url=next_url,callback=self.proccess_page)

	def save_image(self,url,referer_url):
		headers2['Referer']=referer_url
		rsq=requests.get(url,headers=headers2,cookies=self.cookie)
		f=open('D://jpg//'+str(time.time())+".jpg",'wb')
		f.write(rsq.content)
		f.close()
