# -*- coding:utf-8 -*-
import os,sys
import re
import traceback
import time
import urllib
import urllib2
import cookielib
from urllib2 import URLError, HTTPError
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, os.pardir))
import supeanut_config
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir))
from CommonLib.mylog import *


'''
作者：supeanut
创建时间：2016-12-11 16:06
功能描述：
	url访问工具
	支持get，post参数传递方式
	支持http认证，cookie，http_header，代理等动态设置
历史改动：
	2016-xx-xx: xxxxxx
'''
class UrlAccess:
	# 初始化timeout，http headers
	def __init__(self, ):
		self.timeout = supeanut_config.URL_OPENER_TIMEOUT
		self.user_agent = URL_OPENER_USERAGENT
		#self.headers = [self.user_agent]
		self.headers = []
		self.proxy_handler = None
		self.cookie_handler = None
		self.auth_handler = None
		self.cookie = cookielib.CookieJar()

	# 清除cookie缓存
	# 清除handlers
	# 清除http headers
	def reflash_opener(self, ):
		self.__init__()

	def set_timeout(self, timeout):
		self.timeout = timeout

	# 添加http访问头部，如user_agent,origin,referer
	def add_header(self, header_head, header_info):
		self.del_header(header_head)
		header = set([header_head, header_info])
		self.headers.append(header)

	def del_header(self, header_head):
		count = 0
		del_index = -1
		for header in self.headers:
			# 可能存在bug：header_info中有header_head字样
			if header_head in header:
				del_index = count
			count += 1
		if del_index <> -1:
			self.headers.pop(del_index)

	def init_header(self, ):
		self.headers = [self.user_agent]

	# 添加临时cookie，只在该类生命期有效
	def set_cookie(self, ):
		self.cookie_handler = urllib2.HTTPCookieProcessor(self.cookie)

	def cancel_cookie(self, ):
		self.cookie_handler = None

	def del_cookie(self, ):
		self.cookie = cookielib.CookieJar()

	# 获取cookie
	# 无缓存cookie返回None
	# 有缓存cookie（不一定在用），返回dict{name:value...}
	def get_cookie(self, ):
		if self.cookie is None:
			return None
		cookie = {}
		for item in self.cookie:
			cookie[item.name] = item.value
		return cookie

	# 添加代理, opener直接使用
	# INPUT： 'https',www.sina.com.cn,443,cyh,123456
	def set_proxy(self, protocol, host, port, user=None, pswd=None):
		if not isinstance(port, int):
			port = int(port)
		if user is not None and pswd is not None:
			proxy_str = "%s:%s@%s:%d"%(protocol, user, pswd, host, port)
		else:
			proxy_str = "%s:%d"%(host, port)
		self.proxy_handler = urllib2.ProxyHandler({protocol: proxy_str})

	def cancel_proxy(self, ):
		self.proxy_handler = None

	# 添加http基础认证（身份验证）
	# INPUT："Members"，'http://ridingfantasy.com'，'qwe56'，'PSWD'
	# realm是服务器返回的401HTTPHEAD中决定的
	def set_basehttpauth(self, realm, toplevel_url, user, pswd):
		if self.auth_handler is not None:
			self.del_basehttpauth()
		self.auth_handler = urllib2.HTTPBasicAuthHandler()
		self.auth_handler.add_password(realm, toplevel_url, user, pswd)

	def cancel_basehttpauth(self, ):
		self.auth_handler = None

	# get_response之前调用
	# 根据设定的handlers，建立opener
	# 给opener添加http访问头部
	def opener_build(self, ):
		# 建立空opener
		self.opener = urllib2.build_opener()
		# 添加handlers
		if self.auth_handler is not None:
			self.opener.add_handler(self.auth_handler)
		if self.cookie_handler is not None:
			# Cookie信息不会清除
			self.opener.add_handler(self.cookie_handler)
		if self.proxy_handler is not None:
			self.opener.add_handler(self.proxy_handler)
		# 添加http访问头部
		self.opener.addheaders = self.headers


	# 使用自定义opener，实现url访问
	# INPUT: 'http://www.xxx.com/aaa/bbb', "POST", {'user':'2', 'psw':'ws'}
	# --目前只有post，get两种方法
	# --url需要写全，参数需要str utf8格式
	# OUTPUT: {'code': 200, 'url': 'http://www.xxx.com/aaa/bbb', 'body':'page str', 'head':{'cache':'aaa',...} }
	# --code值为int
	# --url值为重定向页面
	# --head字典为服务器返回的http头部信息
	def get_response(self, url, method="GET", request_dict={}):
		request_str = urllib.urlencode(request_dict)
		self.opener_build()
		try:
			if method == "GET":
				if request_str == "":
					response = self.opener.open(url, timeout = self.timeout)
				else:
					response = self.opener.open(url + '?' + request_str, timeout = self.timeout)
			elif method == "POST":
				response = self.opener.open(url, data = request_str.encode('utf-8'), timeout = self.timeout)
			else:
				return False, 'method(GET,POST) invalid'
		except HTTPError, e:
			return False, 'The server couldn\'t fulfill the request. Error reason: %s'%e.code
		except URLError, e:
			return False, 'We failed to reach a server. Error code:%s'%e.reason
		except:
			return False, 'url open error, maybe timeout: %s'%traceback.format_exc()
		try:
			response_body = response.read()
			response_head = response.info()
			response_url = response.geturl()
			response_code = response.getcode()
		except:
			return False, traceback.format_exc()
		head = {}
		for head_key, head_value in response_head.items():
			head[head_key] = head_value
		return True, {'code': response_code, 'url': response_url, 'head': head, 'body': response_body}
			
	def FantasyRidingCrawler(self, ):
		log = mylog('FantasyRidingCrawler', None)
		self.set_timeout(2000)
		self.set_cookie()
		self.set_basehttpauth("Members", 'http://ridingfantasy.com', 'qwe565656', '1qw23er4')
		self.get_response('http://ridingfantasy.com', 'GET')
		flag, response = self.get_response('http://ridingfantasy.com/members/members.htm', 'GET')
		if flag is False:
			return flag
		main_str = response['body']
		table_re = re.compile(r"<table cellspacing=0 cellpadding=5 border=1 bordercolor=\"#FFCCFF\" width=\"100%\">[\s\S]*?<blockquote>")
		href_re = re.compile(r"(?<=<a href=\"Model-Section)[\s\S]*?(?=\">)")
		pic_table_re = re.compile(r"<table[\s\S]*?>[\s\S]*?</table>")
		pic_onephpto_htm_re = re.compile(r"(?<=[Aa]{1} href=\")[\s\S]*?(?=\">)")
		f_pic_url_re = re.compile(r"(?<=<img src=\")[\s\S]*?(?=\")")
		f_pic_url_uc_re = re.compile(r"(?<=<IMG src=\")[\s\S]*?(?=\")")
		f_pic_sl_re = re.compile(r"s[0-9]+\.jpg")
		table_str = table_re.search(main_str).group(0)
		href_str_list = href_re.findall(table_str)
		# 爬图片
		log.info("process img")
		for href_str in href_str_list:
			break
			if href_str.find("htm") < 0:
				continue
			log.info("process:%s"%href_str)
			#if href_str.find("ani_dreaminginthegrass2") <0:
			#	continue
			new_url = "http://ridingfantasy.com/members/Model-Section" + href_str
			series_name = href_str.split('/')[1]
			flag, response = self.get_response(new_url, 'GET')
			if flag is False:
				log.error("access failed pic_main:"+new_url)
				continue
			response = response['body']
			#pic_table_str = pic_table_re.search(response,re.IGNORECASE).group(0)
			pic_onephpto_htm_str_list = pic_onephpto_htm_re.findall(response)
			for pic_onephpto_htm_str in pic_onephpto_htm_str_list:
				if pic_onephpto_htm_str.find('members.htm') >= 0:
					continue
				one_pic_url = new_url[:new_url.find('index.htm')] + pic_onephpto_htm_str
				flag, response = self.get_response(one_pic_url, 'GET')
				if flag is False:
					log.error("access failed pic_one_htm:"+one_pic_url)
					continue
				response = response['body']
				#print response
				f_pic_urls = f_pic_url_re.findall(response, re.IGNORECASE)
				if len(f_pic_urls) == 0:
					f_pic_urls = f_pic_url_uc_re.findall(response, re.IGNORECASE)
				if len(f_pic_urls) == 0:
					log.error('find no f_pic_url')
					continue
				# 判断主图
				f_pic_url_real = ""
				for f_pic_url in f_pic_urls:
					if f_pic_url.find(r".gif") >= 0:
						continue
					elif f_pic_sl_re.search(f_pic_url) is not None:
						continue
					else:
						f_pic_url_real = f_pic_url
				if f_pic_url_real == "":
					log.error("find no pic_url:%s"%one_pic_url)
				if f_pic_url_real.find('..') >= 0:
					f_pic_url_real = '/'.join(one_pic_url.split('/')[:-2]) + f_pic_url_real[2:]
				else:
					f_pic_url_real = '/'.join(one_pic_url.split('/')[:-1]) + '/' + f_pic_url_real
				print f_pic_url_real
				log.info('download pic:%s'%f_pic_url_real)
				flag, msg = self.get_response(f_pic_url_real, 'GET')
				if flag is False:
					log.error('download pic failed:%s'%f_pic_url_real)
				file_path = "/mnt/fantasyride/pic/"+series_name+'/'
				if not os.path.exists(file_path):
					os.makedirs(file_path)
				f = open(file_path+f_pic_url_real.split('/')[-1],'wb')
				try:
					f.write(msg['body'])
				except:
					log.error("file write failed")
				try:
					f.close()
				except:
					continue
		# 爬视频
		log.info('process vedio')
		if not os.path.exists("/mnt/fantasyride/vedio/"):
			os.makedirs("/mnt/fantasyride/vedio/")
		for href_str in href_str_list:
			try_count = 0
			if href_str.find("htm") >= 0:
				continue
			new_url = "http://ridingfantasy.com/members/Model-Section" + href_str
			log.info('download vedio:%s'%new_url)
			flag = False
			while flag is False:
				flag, response = self.get_response(new_url, 'GET')
				if flag is False and try_count >= 5:
					try_count += 1
					log.error(response)
					if try_count == 5:
						log.error("download try max")
						break
					continue
			try:
				f = open("/mnt/fantasyride/vedio/"+href_str.split('/')[1],'wb')
				f.write(response['body'])
			except:
				log.error("write file failed")
				continue


	def fangzhenWebAPI(self, ):
		self.set_timeout(10)
		self.set_cookie()
		#self.add_header('Origin', 'https://trade.foundersc.com')
		#self.add_header('Referer', 'https://trade.foundersc.com/Fzwt_login.aspx')
		#self.add_header('Upgrade-Insecure-Requests', '1')
		#self.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')
		#self.add_header('Connection', 'keep-alive')
		request_dict = {'Text1':'31809207', 'hidkjtxt':'1', 'isActive':'1', 'Text3':'12400', 'posx':'8642c94910c47083a6d02cafeea0b53f58c34331940a7adc638ee1b787d7a9b4ee779856dbed4f947c673a0553d59d5016776fa6636df4a0cd94c312f348d7883c2660988672478e330e68cef4d68c870ab688bfe157fe5e313b36c1a36f350c071575869a7401b5785d6a07d916d7083693b4b42993255d743f939eb41e76cc'}
		flag, response = self.get_response('https://trade.foundersc.com/Fzwt_login.aspx', 'POST', request_dict)
		print flag
		print response

if __name__ == '__main__':
	obj = UrlAccess()
	#obj.FantasyRidingCrawler()
	obj.fangzhenWebAPI()
	exit()

	obj.set_timeout(10)
	obj.set_cookie()
	#obj.cancel_cookie()
	#obj.del_cookie()
	#obj.set_proxy('https', '197.97.146.62', 8080)
	#obj.cancel_proxy()
	#obj.cancel_basehttpauth()
	#obj.add_header('Origin', '')
	#obj.del_header('Origin')
	#obj.init_header()
	#flag, response = obj.get_response('http://hq.sinajs.cn/list=sh000001,sh000300,sz399001,sz399005,sz399006', 'GET')
	#flag, response = obj.get_response('http://vip.stock.finance.sina.com.cn/corp/go.php/vISSUE_ShareBonus/stockid/000541.phtml', 'GET')
	if flag is False:
		print response
	else:
		print response
		print response['code']
		print response['url']
		print response['head']
		print response['body']
