#!/usr/bin/python
# encoding=utf-8
# cosine 2010-05

"""解析网页数据

模块依赖：
依赖kscomm.http下的chardet进行编码检测
"""

import re
import urllib
import socket
import urlparse
import _socket
from struct import unpack
from suffix import *
from linkparse import LinkParse
import tagparse
from kscomm.http import charsetck

class HtmlParse:
	"""html内容解析，根据外部提供的html/http headers解析出用户需要的数据"""

	def __init__(self):
		""""""
		pass

	def get_entry_url(self,trueurl,html):
		"""获取真实的入口url"""
		entry_url = trueurl
		meta_list = []
		meta_tags = re.compile('(?is)\<meta[^<>]*?url\s*=([\d\w://\\\\.?=&;%-]*)[^<>]*')
		meta_list = meta_tags.findall(html)
		if meta_list:
			entry_url = meta_list[-1]

		return entry_url

	def get_suburls(self, url, html, tags=[]):
		"""
		获取指定url的下一层urls，tags接收这几个开关参数：
		["a","area","iframe","frame","meta","script","form","img","style"]
		如果tags参数为空的话，默认抓取a/area/iframe/frame的链接。
		"""
		html = html.replace('<br/>','<br />')
		if not tags:
			tags.append('a')
			tags.append('area')
			tags.append('frame')
			tags.append('iframe')
		suburls = {}
		for tag in tags:
			suburls[tag] = []
			
		try:
			urls_dict = {}
			p = LinkParse(tags)
			try:
				p.feed(html)
				urls_dict = p.urls
			except:
				urls_dict = tagparse.get_urls_dict(html,tags)

			for tag in urls_dict:
				urls = urls_dict[tag]
				urls = list(set(urls))
				for u in urls:
					u = u.strip()
					u = self.get_valid_url(u,url)
					if u not in suburls[tag] and u != '':
						suburls[tag].append(u)
	
			return suburls
		except Exception,e:
			print 'get_suburls error: '+str(e)
			return suburls

	def get_charset(self,headers,html):
		"""获取指定url的编码类型"""
		charset = charsetck.check(headers,html)
		self.charset = charset
		return charset

	def get_title(self,headers,html):
		"""获取指定url的title，一般为siteurl"""
		if html:
			html = html.lower()
			charset = self.get_charset(headers,html)

			btitle = html.find('<title')
			if btitle == -1:
				return ""
			etitle = html.find('</title')
			title = html[btitle+7:etitle].strip()
			if html[btitle+6:btitle+7] != '>':
				title = title[title.find('>')+1:]
			if charset == 'utf-8':
				return title
			try:
				title = title.decode(charset).encode('utf-8')
			except:
				pass
			return title
		else:
			return ""

	def get_content_type(self,url,headers):
		"""获取指定url的内容类型：html/css/js/xml..."""
		_type = ''
		content_type = headers.get('content-type')
		if not content_type:
			return _type

		if 'html' in content_type:
			_type = 'html'
		elif 'css' in content_type:
			_type = 'css'
		elif 'javascript' in content_type:
			_type = 'js'
		elif 'xml' in content_type:
			_type = 'xml'
		else:
			_type = 'other'

		if _type and _type != 'other':
			return _type

		if url.find('?') != -1:
			url = url[:url.find('?')]
		if url.endswith('.html'):
			_type = 'html'
		elif url.endswith('.css'):
			_type = 'css'
		elif url.endswith('.js'):
			_type = 'js'
		elif url.endswith('.xml'):
			_type = 'xml'
		else:
			_type = 'other'

		return _type


	def get_ip(self,site):
		"""获取指定url的IP地址，一般为siteurl"""
		domain = self.get_domain(site)
		try:
			ip = socket.gethostbyname(domain)
		except:
			ip = ""
		return ip

	def get_decimal_ip(self, ip):
		"""获取ip地址的十进制格式"""
		return unpack('>L', _socket.inet_aton(ip))[0]

	def get_suburl_by_type(self,suburl,url_type):
		"""返回指定url类型的url"""
		if url_type == 1:
			pass
		elif url_type == 2:
			suburl = self.get_siteurl(suburl)
		return suburl

	def get_valid_url(self, suburl, parent_url):
		"""
		处理相对路径、绝对路径
		处理不正确的协议
		返回一个有效的绝对路径url
		"""
		suburl = suburl.split('#')[0]
		if suburl.strip() == '':
			return ''
		suburl = urllib.quote(suburl,':/\=?,;&#%')
		lowerUrl = suburl.lower()
		if lowerUrl in ('http://','https://','//','\\/\\/','/\\/\\','http:\\\\','https:\\\\','http:\\/\\/','http:/\\/\\'):
			return ''
		whatProtocol = self.check_protocol(lowerUrl)
		if whatProtocol == 'PseudoProtocol':
			return ''
		if whatProtocol == 'OtherProtocol':
			return ''
		if whatProtocol == 'NormalProtocol':
			if lowerUrl.startswith('//'):
				##fix bug:处理//:这种url
				if len(lowerUrl[2:]) <= 5 or '.' in lowerUrl or not lowerUrl.startswith('.') or not lowerUrl.endswith('.'):
					return ''
				suburl = 'http:' + suburl
				return suburl
			if lowerUrl.startswith(('\\/\\/','/\\/\\')):
				suburl = 'http:' + suburl.replace('\\','')
				return suburl
			if lowerUrl.startswith(('http:\\/\\/','http:/\\/\\')):
				suburl = suburl.replace('\\','')
				return suburl
			suburl = suburl.replace('\\\\','//')
			return suburl
		urlDir = self.get_urldir(parent_url)
		# python2.5.x urlparse.urljoin bug fixed by cosine 2011/1/10
		# urlparse.urljoin('http://192.168.10.205/aspcheck.asp','?T=HI')
		# python2.5.x输出http://192.168.10.205/?T=HI
		# python2.6.x输出http://192.168.10.205/aspcheck.asp?T=HI
		if suburl.startswith('?'):
			return parent_url+suburl
		suburl = urlparse.urljoin(urlDir, suburl)
		return suburl

	def get_siteurl(self, url):
		"""获取指定url的siteurl格式：http://www.knownsec.com/"""
		domain = self.get_domain(url)
		if self.is_https(url):
			siteurl = 'https://'+domain
		else:
			siteurl = 'http://'+domain
		return siteurl

	def get_domain(self, url):
		"""获取指定url的域名格式：www.knownsec.com"""
		url = url.lower()
		head_pos = url.find('//')
		if head_pos != -1:
			url = url[head_pos+2:]
		end_pos = url.find('/')
		if end_pos != -1:
			url = url[:end_pos]
		else:
			end_pos = url.find('?')
			if end_pos != -1:
				url = url[:end_pos]
			else:
				end_pos = url.find('#')
				if end_pos != -1:
					url = url[:end_pos]
		domain = url
		return domain

	def get_rootdomain(self,url):
		"""
		获取指定url的根域名格式：
		www.baidu.com ==> baidu.com
		www.baidu.com.cn ==> baidu.com.cn
		"""
		domain = self.get_domain(url).strip()
		if '%' in domain :
			return False
		domain_list = domain.split('.')
		if domain_list[-1].isdigit():
			return False
		for suffix in domain_suffixs_double:
			if domain.endswith(suffix):
				rootDoamin = '.'.join(domain_list[-3:])
				return rootDoamin
		rootDomain = '.'.join(domain_list[-2:])
		return rootDomain

	def get_urldir(self, url):
		"""
		获取并返回指定url（如：http://www.knownsec.com/hi/mal.html）的目录形式：
		http://www.knownsec.com/hi/
		"""
		lowerUrl = url.lower()
		if url.endswith("/"):
			return url
		if lowerUrl.endswith(domain_suffixs) or lowerUrl.endswith(domain_suffixs_double):
			return url + '/'
		urlNoParam = lowerUrl.split("?")
		if len(urlNoParam) < 2 and lowerUrl.endswith(file_suffixs):
			return url[:url.rindex("/")+1]
		elif urlNoParam[0].endswith(file_suffixs):
			return url[:url.rindex("/")+1]
		elif urlNoParam[0].endswith("/"):
			return urlNoParam[0]
		else:
			urlNoParamSem = urlNoParamSem = lowerUrl.split(";")
			if len(urlNoParamSem) > 1:
				if len(urlNoParamSem) < 2 and lowerUrl.endswith(file_suffixs):
					return url[:url.rindex("/")+1]
				elif urlNoParamSem[0].endswith(file_suffixs):
					return url[:url.rindex("/")+1]
				elif urlNoParamSem[0].endswith("/"):
					return urlNoParamSem[0]
		return url + "/"

	def is_current_dir(self,url,current_dir):
		"""判断指定url是否属于当前目录"""
		urldir = self.get_urldir(url)
		current_dir = self.get_urldir(current_dir)
		if current_dir in urldir:
			return True
		else:
			return False

	def is_outsite(self, suburl,parent_url, flag):
		"""
		判断是否为外站
		flag为布尔值：
		True - 严格外站：site.knownsec.com相对www.knownsec.com就是严格外站
		False - 非严格外站：site.knownsec.com与www.knownsec.com同站
		"""
		urlDomain = self.get_domain(parent_url)
		suburlDomain = self.get_domain(suburl)
		if not flag:
			urlSplit = urlDomain.split('.')
			urlDomain = urlSplit[len(urlSplit)-2]+'.'+urlSplit[len(urlSplit)-1]
			suburlSplit = suburlDomain.split('.')
			suburlDomain = suburlSplit[len(suburlSplit)-2]+'.'+suburlSplit[len(suburlSplit)-1]
		if urlDomain == suburlDomain:
			return False
		else:
			return True

	def is_https(self, url):
		"""判断指定url是否是https协议"""
		url = url.lower()
		if url.startswith('https://'):
			return True
		else:
			return False
		
	def is_suburl_by_scope(self,suburl,parent_url,scope):
		"""是否属于指定url范围的url"""
		domain = self.get_domain(parent_url)
		if scope == 1 and not self.is_outsite(suburl,domain,True):
			return True
		elif scope == 2 and not self.is_outsite(suburl,domain,False):
			return True
		elif scope == 3:
			return True
		elif scope == 4 and self.is_current_dir(suburl,parent_url):
			return True
		return False
	
	def is_blackext(self, url):
		"""判断指定url是否是黑名单后缀"""
		if url.find('?') != -1:
			url = url[:url.find('?')]
		if url.endswith(black_file_suffixs):
			return True
		else:
			return False

	def check_protocol(self, url):
		"""判断指定url的协议类型"""
		if url.startswith(('javascript:','vbscript:','mailto:','ftp:','mms:','ldap:','about:','data:')):
			return 'PseudoProtocol'
		elif url.startswith(('http://','https://','//','\\/\\/','/\\/\\','http:\\\\','https:\\\\','http:\\/\\/','http:/\\/\\')):
			return 'NormalProtocol'
		otherProtocol = urlparse.urlparse(url)[0]
		if otherProtocol != '':
			return 'OtherProtocol'

