﻿# -*- coding:UTF-8 -*-
# coding:utf-8
import scrapy,requests,time

import os
import re
from scrapy_test.settings import IMAGES_STORE

from scrapy_test.items import ImageItem
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

# 提取一整页及里面每一页的所有照片
#  https://www.meitulu.com/rihan/10.html


class MeiTuluSpider(scrapy.Spider):
	name = "mt"
	download_delay = 3
	allowed_domains = ["www.meitulu.com"]
	#list=[str(17889-x) for x in range(1,2)]
	list=[]
	start_urls=[]
	a = []


	header = {
		'DNT':'1',
		'Host': 'www.meitulu.com',
		'Connection': 'keep-alive',
		'Cache-Control': 'max-age=0',
		'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
		'accept-language': 'zh-CN,zh;q=0.8',
		'Referer': 'https://www.meitulu.com/item/18097.html',
		'User-agent': "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
		"folder":""
	}

	cookie={
		'CNZZDATA1255357127':'68952563-1550739646-https%253A%252F%252Fwww.baidu.com%252F%7C1563323646',
		'UM_distinctid':'1690f62862a859-0d95c83f05d4e4-1333063-1fa400-1690f62862b526'
	}

	def start_requests(self):
		self.log('>>>>>>>>>>>>>>>>>>')
		yield scrapy.Request(url="https://www.meitulu.com/t/yumi-sugimoto/", callback=self.get_url_page,headers=self.header,cookies=self.cookie)

	def get_url_page(self,response):

		for i in response.css('.img .p_title a::attr(href)').extract():
			url_num=re.findall(r"\d+\.?\d*",i)[0][:-1]
			print url_num
			self.list.append(url_num)

		for i in self.list:
			url="https://www.meitulu.com/item/{0}.html".format(i)
			print 'Image Page URL='+url
			self.start_urls=[url]
			yield scrapy.Request(url=url, callback=self.get_urls,headers=self.header,cookies=self.cookie,meta={"number":i})




	def get_urls(self,response):
		folder="F:\\pic\\yumi-sugimoto\\"+response.css('.weizhi h1::text').extract_first()
		print folder
		if not os.path.exists(folder):
			os.makedirs(folder)
		urls= response.css('#pages a::text').extract()
		max_page=urls[-2]
		print "max page=" + urls[-2]
		self.a = ['https://www.meitulu.com/item/'+response.meta['number']+'_%s.html' % str(x + 1) for x in range(1, int(max_page))]
		self.start_urls.extend(self.a)
		for url in self.start_urls:
			print 'Every Image Page URL='+url
			yield scrapy.Request(url=url, callback=self.parse,headers=self.header,cookies=self.cookie,meta={'folder':folder} )



	def parse(self, response):
		#self.cookie=response.spider.cookie
		print response.css('.weizhi h1::text').extract_first()
		#title=response.css('.weizhi h1').extract_first()

		for info in response.css('.content img'):
			#item['title']=info.css('img::attr(alt)').extract_first()
			page_url=info.css('a::attr(href)').extract_first()
			print info.css('img::attr(alt)').extract_first()
			print info.css('img::attr(src)').extract_first()

			item = ImageItem()
			item['image_urls'] = [info.css('img::attr(src)').extract_first()]
			item['images'] = [info.css('img::attr(src)').extract_first()]
			item['referer'] = self.header['Referer']
			item['folder'] =response.meta['folder']
			yield item
			#self.save_image( item['image_urls'], item['referer'])

			#yield scrapy.Request(url="https://www.meitulu.com/item/18097.html",callback=self.proccess_page)


	def proccess_page(self,response):
		item = ImageItem()
		item['image_urls']=response.css('.main-image img::attr(src)').extract_first()
		item['images']=response.css('.main-title::text').extract_first()
		item['Referer'] =response.url
		print response.url + '>>>>>' + item['image_urls'][0]
		next_url = response.css('.main-image a::attr(href)').extract_first()
		yield item
		yield scrapy.Request(url=next_url,callback=self.proccess_page)

	def save_image(self, url, referer_url):
		self.header['Referer'] = referer_url
		try:
			print 'saving pic url='+url
			rsq = requests.get(url, headers=self.header, cookies=self.cookie)
			f = open('D://jpg//' + str(time.time()) + ".jpg", 'wb')
			f.write(rsq.content)
			f.close()
		except Exception, err:
			print err




class transCookie:
	def __init__(self, cookie):
		self.cookie = cookie

	def stringToDict(self):
		itemDict = {}
		items = self.cookie.split(';')
		for item in items:
			key = item.split('=')[0].replace(' ', '')
			value = item.split('=')[1]
			itemDict[key] = value
		return itemDict


