# -*- coding: utf-8 -*-
import scrapy
import time
import hashlib
import json
from medlive.items import MedliveItem

import urllib
import urllib2
# import urllib2
import requests 
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')

class MedliveSpider(scrapy.Spider):
	name = "medlive"
	allowed_domains = ['guide.medlive.cn']
	start_urls = ['http://guide.medlive.cn']

	file_path = 'download'

	headers = {
		"Proxy-Connection": "keep-alive",
		"Pragma": "no-cache",
		"Cache-Control": "max-age=0",
		"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
		"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
		"DNT": "1",
		"ETag": "M-c9e7035bf8c834df6e397d1fb9b01dc7",
		"Vary": "Accept-Encoding",
		"Connection": "keep-alive",
		"Accept-Encoding": "gzip, deflate",
		"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
		"Referer": 'http://guide.medlive.cn/guideline/list',
		"Accept-Charset": "utf-8;q=0.7,utf-8;q=0.7,*;q=0.7",
		"Cookie": "ymt_pk_id=b2a73445f18ab33f; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22167bad2c16a1b3-028a573b7c87b-6313363-2359296-167bad2c16bdce%22%2C%22%24device_id%22%3A%22168506ab32845b-0f307f61a19a1c-b781636-2359296-168506ab3298d5%22%7D; Hm_lvt_62d92d99f7c1e7a31a11759de376479f=1547524385; sess=ST-537635-4hdceT7AzdUnj0wZwGJq-cas; guide_app_pop=Y; ymtinfo=eyJ1aWQiOiIzNjI4ODUwIiwicmVzb3VyY2UiOiIiLCJhcHBfbmFtZSI6IiIsImV4dF92ZXJzaW9uIjoiMSJ9; _pk_ref.3.a971=%5B%22%22%2C%22%22%2C1549463048%2C%22http%3A%2F%2Fguide.medlive.cn%2Fguidelinesub%2F4671%22%5D; _pk_ses.3.a971=*; Hm_lpvt_62d92d99f7c1e7a31a11759de376479f=1549463056; _pk_id.3.a971=b2a73445f18ab33f.1545027631.26.1549463056.1549208525.",
		"Upgrade-Insecure-Requests": "1"

		# "Host": "guide.medlive.cn"
	}
	cookie = {}

	def __init__(self):
		cookie = 'ymt_pk_id=b2a73445f18ab33f; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22167bad2c16a1b3-028a573b7c87b-6313363-2359296-167bad2c16bdce%22%2C%22%24device_id%22%3A%22168506ab32845b-0f307f61a19a1c-b781636-2359296-168506ab3298d5%22%7D; Hm_lvt_62d92d99f7c1e7a31a11759de376479f=1547524385; sess=ST-537635-4hdceT7AzdUnj0wZwGJq-cas; guide_app_pop=Y; ymtinfo=eyJ1aWQiOiIzNjI4ODUwIiwicmVzb3VyY2UiOiIiLCJhcHBfbmFtZSI6IiIsImV4dF92ZXJzaW9uIjoiMSJ9; _pk_ref.3.a971=%5B%22%22%2C%22%22%2C1549463048%2C%22http%3A%2F%2Fguide.medlive.cn%2Fguidelinesub%2F4671%22%5D; _pk_ses.3.a971=*; Hm_lpvt_62d92d99f7c1e7a31a11759de376479f=1549463056; _pk_id.3.a971=b2a73445f18ab33f.1545027631.26.1549463056.1549208525.'
		self.cookie = self.stringToDic(cookie)

	def start_requests(self):
		url = 'http://i.medlive.cn/'
		print self.cookie
		request = scrapy.Request(
			url = url,
			headers = self.headers,
			cookies = self.cookie,
			method = 'get',
			callback = self.parse_list,
			dont_filter = True
		)
		yield request
	def parse_login(self, response):
		print 'response.url'
		print response.url
	def parse_list(self, response):
		url = 'http://guide.medlive.cn/guideline/list'
		request = scrapy.Request(url, headers = self.headers, cookies = self.cookie, callback=self.parse_ajax, dont_filter = True)
		yield request

	# 当前50到70页
	def parse_ajax(self, response):
		self.headers['Referer'] = 'http://guide.medlive.cn/guideline/list'
		totalPage = 1638
		for j in range(1510, totalPage):
			time.sleep(30)
			pages = 20
			link = 'http://guide.medlive.cn/ajax/load_more.ajax.php'
			for i in range(1, pages):
				currentPage = i + j
				time.sleep(2)
				request = scrapy.FormRequest(
					url = link,
					headers = self.headers,
					method = 'POST',
					cookies = self.cookie,
					meta = {'page':currentPage},
					formdata = {
						'page':str(currentPage),
						'sort':'publish'
					},
					callback = self.parse_download,
					dont_filter = True 
				)
				yield request

	def parse_download(self, response):
		item = MedliveItem()
		datas = json.loads(response.body)['data_list']

		for data in datas:
			time.sleep(2)
			self.headers['Referer'] = 'http://guide.medlive.cn/guideline/' + str(data['id'])
			if len(data['web_file_info']) > 0:
				f = json.loads(data['web_file_info'])[0]['id']
				n = json.loads(data['web_file_info'])[0]['fn']
				k = json.loads(data['web_file_info'])[0]['sk']
				gid = data['id']
				n = n.replace(' ', '+')
				filePath = self.file_path + '/' + str(gid)
				if not(os.path.exists(filePath)):
					os.mkdir(filePath)
				downloadFile = filePath + '/' + n
				# if not(os.path.exists(downloadFile)):
				meta = {
					'filePath': downloadFile
				}		
				url = 'http://guide.medlive.cn/guideline/download.php?f=' + str(f) + '&n=' + n.encode('utf-8') + '&k=' + k + '&g=' + str(gid) + '&o=' + str(data['sub_type'])

				# request = scrapy.Request(url=url, headers = self.headers, meta=meta, cookies = self.cookie, callback=self.parse_jump, dont_filter = True)
				html = requests.get(url, headers=self.headers, allow_redirects=False)
				# print "html"
				# print html.headers['Location']
				r = requests.get(html.headers['Location'])
				with open(downloadFile, "wb") as code:
					code.write(r.content)				
				# yield request
			else:
				with open('error', "wb") as error:
					error.write(data['id'] + ',')

	def parse_download2(self, response):
		item = MedliveItem()
		datas = json.loads(response.body)['data_list']

		for data in datas:
			time.sleep(5)
			self.headers['Referer'] = 'http://guide.medlive.cn/guideline/' + str(data['id'])
			f = json.loads(data['web_file_info'])[0]['id']
			n = json.loads(data['web_file_info'])[0]['fsn']
			k = json.loads(data['web_file_info'])[0]['sk']
			gid = data['id']
			n = n.replace(' ', '+')
			filePath = self.file_path + '/' + str(gid)
			# filePath = self.file_path + '/' + str(response.meta['page'])
			if not(os.path.exists(filePath)):
				os.mkdir(filePath)
			downloadFile = filePath + '/' + n
			meta = {
				'filePath': downloadFile
			}			
			url = 'http://webres.medlive.cn/upload/temp/c8/3628850/' + str(f) + '/' + urllib.quote(n.encode('utf-8'))
			print url
			# url = 'http://webres.medlive.cn/upload/temp/d4/3637787/' + str(f) + '/' + urllib.quote(n.encode('utf-8'))
			r = requests.get(url)
			print 'r content'
			print r.content
			with open(downloadFile, "wb") as code:
				code.write(r.content)


	def parse_jump(self, response):
		print 'response'
		print response
		time.sleep(5)
		# item = MedliveItem()
		filePath = response.meta['filePath']
		url = response.url
		print "url"
		print url
		# item['file_urls'] = [link]
		# yield item		
		r = requests.get(url)
		with open(filePath, "wb") as code:
			code.write(r.content)


	def stringToDic(self, cookie):
		itemDict = {}
		items = cookie.split(';')
		for item in items:
			key = item.split('=')[0].replace(' ', '')
			value = item.split('=')[1]
			itemDict[key] = value
		return itemDict			