import os,sys,cPickle,time,urllib2,re
import requests
from pybloom import ScalableBloomFilter
from socket import *
import zlib,fcntl
import multiprocessing
from multiprocessing import Queue,JoinableQueue
from urlparse import urlparse,urljoin
import sqlite3

reload(sys) 
sys.setdefaultencoding('utf8')
header={
        "User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13",
        #"User-Agent" = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.13) Gecko/20101206 Ubuntu/10.10 (maverick) Firefox/3.6.13",
        "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Accept-Language":"zh-cn,zh;q=0.5",
        #"Accept-Encoding":"gzip,deflate,compress",
        "Accept-Charset":"GB2312,utf-8;q=0.7,*;q=0.7",
        "Keep-Alive":"115",
        "Connection":"keep-alive"
    }

MAXLEN=50000 #should 32 Megabyte
VISITEDURLDB="visited_url.db"
EMPTY='-1'

def worker(inq,doq,finq,no):
	#print i,j
	c=Crawler()
	d=DB("crawl",no)
	for url in iter(inq.get, 'STOP'):
		#do something
		res=c.run_crawl(url)
		urls=res['urls']
		d.db_add_record(res['output'], 0)
		dic={}
		finq.put(url)
		if urls==EMPTY:
			continue
		for u in urls:
			dic['src']=url
			dic['dst']=u
			dic['time']=time.strftime('%Y-%m-%d %H:%M:%S')
			d.db_add_record(dic, 1)
			doq.put(u)
		#print r
	
def open_port(ip,*port):
	target_ip = ip
	print target_ip
	open_ports=[]
	for i in port:
		s = socket(AF_INET, SOCK_STREAM)
		s.settimeout(2)
		result = s.connect_ex((target_ip, i))
		if(result == 0):
			open_ports.append(i)
		try:
			s.close()
		except:
			#print "aaaa"
			pass
	return open_ports

class DB(object):
	"""docstring for DB"""
	def __init__(self,arg,count):
		self.db_base=arg
		self.db_count=count
		self.db_cur=''
		self.maxsize=10240
		self.buffer={}
		self.buffer[1]=[]
		self.buffer[2]=[]
		self.buffer_len={}
		self.buffer_len[1]=0
		self.buffer_len[2]=0
		self.buffer_size=20
		self.init()

	def init(self):
		self.db_cur='%s_%s.db'%(self.db_base,self.db_count)
		self.db_create_file(self.db_cur)
		

	def db_create_file(self,db):
		if os.path.exists(db):
			print "%s file exists"%db
			return True
		else:
			#print db
			f=open(db,"w")
			f.close()
			self.db_create_table(db)
			return False

	def db_check_size(self,db):
		s=os.path.getsize(db)
		s=s>>20
		if s>=self.maxsize:
			self.db_count=self.db_count+1
			self.db_cur='%s_%s.db'%(self.db_base,self.db_count)
			self.db_create_file(self.db_cur)
			return True
		else:
			return False


	def db_create_table(self,db):
		conn=sqlite3.connect(db)
		sql="CREATE TABLE crawl_info(uid INTEGER primary key, url TEXT,redpath TEXT,header TEXT,content TEXT,time TEXT)"
		conn.execute(sql)
		sql="CREATE TABLE crawl_map(uid INTEGER primary key, src TEXT, dst TEXT,time TEXT)"
		conn.execute(sql)
		conn.commit()
		conn.close()

	def db_add_record(self,dic,flag):
		if flag==0:
			#sql="INSERT INTO crawel_info(url,ports,redpath,header,content,time) VALUES (%s,%s,%s,%s,%s,%s) "%(dic['url'],dic['ports'],dic['redpath'],dic['header'],dic['content'],dic['time'])
			self.buffer[1].append((dic['url'],dic['redpath'],dic['header'],dic['content'],dic['time']))
			self.buffer_len[1]=self.buffer_len[1]+1
		else:
			#sql="INSERT INTO crawel_map(src,dst,time) VALUES (%s,%s,%s) "%(dic['src'],dic['dst'],dic['time'])
			self.buffer[2].append((dic['src'],dic['dst'],dic['time']))
			self.buffer_len[2]=self.buffer_len[2]+1
		if self.buffer_len[1]>=self.buffer_size:
			
			conn=sqlite3.connect(self.db_cur)
			conn.text_factory = str
			cur=conn.cursor()
			cur.executemany("INSERT INTO crawl_info(url,redpath,header,content,time) VALUES (?,?,?,?,?)",self.buffer[1])
			conn.commit()
			conn.close()
			self.buffer_len[1]=0
			del self.buffer[1][:]
			pass
		if self.buffer_len[2]>=self.buffer_size:
			conn=sqlite3.connect(self.db_cur)
			conn.text_factory = str
			cur=conn.cursor()
			cur.executemany("INSERT INTO crawl_map(src,dst,time) VALUES (?,?,?)",self.buffer[2])
			conn.commit()
			conn.close()
			self.buffer_len[2]=0
			del self.buffer[2][:]
			pass

	def db_dump_records(self,records):
		fp=open("test.csv")
		fcntl.flock(fp, fcntl.LOCK_EX)
		fcntl.flock(fp, fcntl.LOCK_UN)
		pass



class Manager(object):
	"""docstring for Manager"""
	def __init__(self):
		#self.pool=multiprocessing.Pool(20)
		self.inq=multiprocessing.Queue()
		self.doq=multiprocessing.Queue()
		self.finq=multiprocessing.Queue()
		self.visited=ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH)
		self.pool=[]


	def add_worker(self,func,no):
		#self.q.put(args[0])
		p=multiprocessing.Process(target=func, args=(self.inq,self.doq,self.finq,no))
		self.pool.append(p)
		p.daemon = True
		p.start()


	def run_manger(self):
		while True:
			if not self.finq.empty():
				url=self.finq.get()
				self.visited_add(url)
			if not self.doq.empty():
				job=self.doq.get()
				if not self.visited_contain(job):
					self.inq.put(job)



	def wait(self):
		for t  in self.pool:
			t.join()



	def visited_add(self,url):
		self.visited.add(url)

	def visited_contain(self,url):
		return url in self.visited

	def visited_len(self):
		return len(self.visited)

	def visited_save(self):
		f=open(VISITEDURLDB,"w")
		self.visited.tofile(f)
		f.close()

	def visited_load(self):
		f=open(VISITEDURLDB)
		self.visited=self.visited.fromfile(f)
		f.close()


class Crawler(object):
	"""docstring for """
	def __init__(self):
		self.timeout=10
		self.type_filter=['application/','audio/','image/','video/']
		pass

	def get_header(self,url):
		url=url
		r=requests.get(url,allow_redirects=False,stream=True,headers=header,timeout=self.timeout)
		code=r.status_code
		ck_flag=False
		run_flag=True
		cookies=''
		count=1
		history={}
		history[count]='%s#%s'%(url,code)
		while code != requests.codes.ok:
			try:
				r.raise_for_status()
			except:
				run_flag=False
				print "%s status code is %s"%(url,code)
				break
			nurl=r.headers['location']
			o = urlparse(nurl)
			if not o.scheme and not o.netloc:
				nurl=urljoin(url,nurl)
			#print nurl

			if r.cookies or ck_flag:
				#get cookies and set cookies once,then use this cookies all the time
				if not ck_flag:
					cookies=r.cookies
					ck_flag=True
				r=requests.get(nurl,allow_redirects=False,stream=True,headers=header,cookies=cookies,timeout=self.timeout)
			else:
				r=requests.get(nurl,allow_redirects=False,stream=True,headers=header,timeout=self.timeout)

			code=r.status_code
			count+=1
			history[count]='%s#%s'%(nurl,code)
		return run_flag,history,r


	def get_content(self,url):
		try:
			f,h,r=self.get_header(url)
		except requests.exceptions.SSLError as e:
			#print "%s fail to  "%(url)
			print e.args
			return None,str(e.args),None
		except requests.exceptions.Timeout as e:
			print e.args
			return None,str(e.args),None
		except requests.exceptions.ConnectionError as e:
			print e.args
			return None,str(e.args),None
		except:
			print "%s is invaild url"%(url)
			return None,"invaild url",None


		#except:
		#print h,r.headers,r.cookies
		if f:
			header=r.headers

			try:
				if int(header['content-length']) >=MAXLEN:
					return None,h,r.headers
				for ft in self.type_filter:
					if header['content-type'].find(ft)!=-1:
						return None,h,r.headers
			except:
				print "no content-length or content-type field in %s"%url
			return r.content,h,r.headers
		else:
			return None,h,r.headers

	def get_urls(self,content):
		url_finders=[
		re.compile('http[s]?://[^\s<>"]+|www\.[^\s<>"]+')
		#re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
		]
		urls=[]
		for v in url_finders:
			urls+=v.findall(content)
		#print urls
		urls=list(set(urls))
		return urls



	def run_crawl(self,url):
		res_dict={}
		output={}
		c,h,rh=self.get_content(url)
		output['url']=url
		
		output['time']=time.strftime('%Y-%m-%d %H:%M:%S')
		if h:
			if type(h)==dict:
				t=""
				for k in h:
					t=t+str(k)+"#"+h[k]+","
				output['redpath']=t
			if type(h)==str:
				output['redpath']=h
		else:
			output['redpath']=EMPTY
		if rh:
			output['header']=str(rh)
		else:
			output['header']=EMPTY
		if c:
			output['content']=c
			res_dict['urls']=self.get_urls(c)
		else: 
			output['content']=EMPTY
			res_dict['urls']=EMPTY
		res_dict['output']=output
		return res_dict



def go():

	m=Manager()
	for i in range(0,1):
		m.add_worker(worker,i)
	for i in open("IPTESTSM"):
		ip=i.strip("\n")
		res=open_port(ip, 80)
		if 80 in res:
			url="%s%s"%("http://",ip)
			print url
			m.inq.put(url)
	#while True:
	m.run_manger()

	m.wait()
	

def test():
	url="http://107.23.220.9"
	c=Crawler()
	f,h,r=c.get_content(url)
	print h 
	pass
if __name__ == '__main__':
	#test()
	go()



