#!/usr/bin/python
# encoding=utf-8
# cosine 2010-05

"""获取网页基本信息
使用方法，看该程序的test样例
"""

import urllib2

class HttpFetch:
	"""
	HTTP数据获取类
	"""
	def __init__(self):
		self.http_opener = None
		
		# 输入
		self.url = None
		self.req_headers_dict = {} # {'user-agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)'}
		self.req_headers_list = [] # [('user-agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')]
		
		# 输出
		self.html = None
		self.headers = None
		self.code = None
		self.trueurl = None
		self.error = None
		
	def set_http_opener(self,opener):
		"""设置http请求句柄"""
		self.http_opener = opener
		
	def set_headers(self,**headers):
		"""设置http请求头"""
		for key in headers:
			value = headers[key]
			self.__set_header(key, value)

	def __set_header(self,key,value):
		if not value:
			return
		value = value.strip()
		key = key.strip().replace('_','-')
		if key.lower() in ('cookies','cookie'):
			key = 'cookie'
		if value:
			self.req_headers_dict[key] = value
			self.req_headers_list.append((key, value))
		
	def request(self,url,method='GET',post_data=None,req_headers={}):
		"""发起http请求
		post_data: a=1&b=2
		"""
		self.url = url
		method = method.upper()
		if method not in ('GET','POST'):
			print 'ksspider request error: method %s not in ("GET","POST")'%method
			return
		if method == 'GET' and post_data:
			if '?' in url:
				self.url = url + '&' + post_data
			else:
				self.url = url + '?' + post_data
		try:
			# 如果req_headers不为空，则设置请求头
			for key, value in req_headers.items():
				self.__set_header(key, value)

			if self.http_opener:
				self.http_opener.addheaders = self.req_headers_list
				if method == 'GET':
					u = self.http_opener.open(self.url)
				elif method == 'POST':
					u = self.http_opener.open(self.url,post_data)
			else:
				if method == 'GET':
					req = urllib2.Request(self.url,headers=self.req_headers_dict)
				elif method == 'POST':
					req = urllib2.Request(self.url,data=post_data,headers=self.req_headers_dict)
				u = urllib2.urlopen(req)
			self.html = u.read()
			self.headers = u.headers.dict
			self.code = u.code
			self.trueurl = u.url
			self.broken = False
		except Exception,e:
			#print 'ksspider request error: %s'%self.error
			try:
				self.html = e.read()
			except:
				self.html = ''
			try:
				self.headers = e.headers.dict
			except:
				self.headers = {}
			self.error = str(e)
			try:
				self.code = e.code
			except:
				self.code = 404
			#if str(self.code) == '404':
			#	self.html = ''
			self.trueurl = self.url
			self.broken = True
		
	def get_html(self):
		"""返回html数据"""
		return self.html
	
	def get_headers(self):
		"""返回http头部信息"""
		return self.headers
	
	def get_code(self):
		"""返回http状态"""
		return self.code
	
	def get_trueurl(self):
		"""返回真实的url"""
		return self.trueurl
	
	def get_error(self):
		"""获取请求的错误信息"""
		return self.error
	
def test_fetch(url,referer='',user_agent='',cookies=''):
	http_opener = None
	
	fetch = HttpFetch() # 初始化
	fetch.set_http_opener(http_opener) # 设置http请求opener
	fetch.set_headers(referer=referer,user_agent=user_agent,cookies=cookies) # 设置相应头部
	fetch.request(url) # 发起请求
	
	html = fetch.get_html()
	headers = fetch.get_headers()
	code = fetch.get_code()
	trueurl = fetch.get_trueurl()
	error = fetch.get_error()
	print 'code: %s\ntrueurl: %s\nheaders: %s\nerror: %s\nlen(html):%s\n'%(code,trueurl,headers,error,len(html))
	
def test():
	print '>> success case'
	test_fetch('http://www.baidu.com',user_agent='Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
	print '>> failure case'
	test_fetch('http://www.*.com',user_agent='Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
	
if __name__ == '__main__':
	test()
	
	"""
	如果test()正常的话，应该输出如下结果：
	
	>> success case
	code: 200
	trueurl: http://www.baidu.com
	headers: {'content-length': '6222', 'set-cookie': 'BAIDUID=02D77503EB4A11D426B3EA53007D0EE3:FG=1; expires=Wed, 24-Nov-40 03:47:43 GMT; path=/; domain=.baidu.com', 'expires': 'Wed, 24 Nov 2010 03:47:43 GMT', 'server': 'BWS/1.0', 'connection': 'Close', 'cache-control': 'private', 'date': 'Wed, 24 Nov 2010 03:47:43 GMT', 'p3p': 'CP=" OTI DSP COR IVA OUR IND COM "', 'content-type': 'text/html;charset=gb2312'}
	error: None
	len(html):6222
	
	>> failure case
	code: 404
	trueurl: http://www.*.com
	headers: {}
	error: <urlopen error [Errno 11004] getaddrinfo failed>
	len(html):0

	"""
	
