import sys
reload(sys)
sys.setdefaultencoding('utf8')

import  urllib2
from urllib2 import Request
from urllib2 import HTTPError
from urllib2 import URLError
import gevent
from gevent import queue
from HTMLParser import HTMLParser
from gevent import pool
from gevent.pool import Group
import time
import os
from pandas import Series
from pandas import DataFrame
from settings import headers
from settings import settings

class FileCore(object):
	def __init__(self,settings):
		self.settings= settings
	def save(self,name,data):
		path = os.path.join(self.settings['paht'],name)
		data.to_csv(path)

class MulCore(object):
	cores = pool.Pool(40)
	Loaded = False
	@staticmethod
	def mul(func):
		def _mul(*args,**kargs):
			one_spider = gevent.spawn(func,*args,**kargs)
			MulCore.cores.add(one_spider)
			if MulCore.cores.full():
				MulCore.cores.join()


		return _mul

	
	@staticmethod
	def run():
		MulCore.cores.join()

class GetCore(object):

	def __init__(self,arguments):
		self.url ,self.headers = arguments

	def create_request(self):
		return Request(self.url,headers=self.headers)
	
	@classmethod
	def read_http(cls,request ,after_call=None):
		try:
			respond = urllib2.urlopen(request,timeout=5)
			if after_call:
				return after_call(respond)
			else:
				return respond

		
		except HTTPError,e:
			print e
			

		except ZeroDivisionError,e:
			print "no targets"

		except UnicodeDecodeError,e:
			UrlCore.error_links += [request.get_link()]
			print e

	@classmethod
	def get(cls,url_and_headers):
		g = cls(url_and_headers)

		return g.create_request()

class UrlCore(object):
	targets = DataFrame()
	tasks = queue.Queue(maxsize=10000)
	tasks_urls = set()
	error_links = []
	got_links= []

	def __init__(self,headers,start_url):
		self.headers= headers
		UrlCore.tasks.put(start_url)
	

	@MulCore.mul
	def spawn(self,id,headers=None):
		print "running ",id
		gevent.sleep(0)
		UrlCore.statics()
		count = 0
		try:


			start_url = start_url = UrlCore.tasks.get()	
			got_link =  UrlCore.get_all_link()
			if start_url in got_link:
				return
			start = time.time()
			print "{}".format(time.asctime())
			print start_url
			if not headers:
				headers = self.headers		
			request = GetCore.get([start_url,headers])
			respond = GetCore.read_http(request)
			if respond:
				links,txt = WebParaser.get_links(respond)

				print "getting {}".format(start_url)
				self.save_respond(request,txt)
				print "used : {} , got : {} ,from : {}".format( time.time()-start,len(UrlCore.get_all_link()),id)
				if UrlCore.tasks.qsize() < 9000:
					[UrlCore.tasks.put(link) for link in links if UrlCore.check_url(link)]

				if UrlCore.tasks.full():
					gevent.sleep(3)
					[UrlCore.tasks.put(link) for link in links if UrlCore.check_url(link)]

					
				

				
			else:
				pass
			count+=1

		except HTTPError,e:
			print e
		except URLError ,e:
			print e
		except AttributeError:
			pass
		else:
			pass

	def to_series(self,data):
		s  =Series(data)
		s.index = data.keys

	@staticmethod
	def check_url(url):
		old = len(UrlCore.tasks_urls)
		UrlCore.tasks_urls.add(url)
		new_old = len(UrlCore.tasks_urls)
		if new_old == old:
			return False
		return True
	@staticmethod
	def statics():
		print "cores : {} , tasks : {}".format(len(MulCore.cores),UrlCore.tasks.qsize())

	@MulCore.mul
	def bak_mul(self):
		self.mul_spawn()
		while 1:
			UrlCore.statics()
			if MulCore.cores.free_count()> 7:
				print "no work to do"
				if  UrlCore.tasks.empty():
					print "empty link"
					gevent.sleep(2)					
					series = Series.from_csv("links_csv.link")
					keys = series.keys()
					while len(series)!=0:
						this_keys = [keys.pop() for i in xrange(25)]
						links = [series.pop(k) for k in this_keys]
						[UrlCore.tasks.put(i) for i in links]
						print "load new links"
						gevent.sleep(0.01)

					gevent.sleep(1)
					continue

				last = MulCore.cores.size - MulCore.cores.free_count()
				print "add work" 
				
				[self.spawn(i) for i in xrange(7) ]
				new_last = MulCore.cores.size - MulCore.cores.free_count()
				print "add {} worker ok".format(new_last-last)
				gevent.sleep(2)
			elif UrlCore.tasks.full():
				
				_arr =  [UrlCore.tasks.get() for i in xrange(50)]
				series = Series(_arr)
				series.to_csv("links_csv.link")

				print "save links to file"
				gevent.sleep(20)
			else:

				gevent.sleep(5)
		
	@MulCore.mul
	def mul_spawn(self):
		
		try:
			while 1:
				if  UrlCore.tasks.empty():
					print "empty link"
					gevent.sleep(1)
					continue
				last = MulCore.cores.size - MulCore.cores.free_count()


				links = UrlCore.get_all_link()

				[self.spawn(id) for id in xrange(5) ]
				new_last = MulCore.cores.size - MulCore.cores.free_count()
				print "loading : {} worker  ok".format(new_last-last)
				print links

				print "end one core"
		except KeyError,e:
			print e
	def save_respond(self,request,txt):
		if len(UrlCore.targets) > 10:
			for key in UrlCore.targets:
				UrlCore.targets[key].to_csv(key)
			UrlCore.targets.to_csv("data.csv")
		try:
			UrlCore.targets[request.get_host()][request.get_full_url()] = txt 
		except KeyError:
			UrlCore.targets[request.get_host()] = Series()
			UrlCore.targets[request.get_host()][request.get_full_url()] = txt 


	@staticmethod
	def get_all_link():
		links = []
		for key in UrlCore.targets.keys():
			links += list(UrlCore.targets[key].keys())
		return links



	def __getitem__(self,*keys):
		print keys
		if len(keys)==2:
			return self.targets[keys[0][0]][keys[0][1]]
		else:
			return self.targets[keys[0][0]]

	def keys(self,subkey=None):
		if subkey:
			return [self.targets[key].keys() for key in  self.targets ]
		return [key for key in self.targets]


class WebParaser(HTMLParser):
	def __init__(self,host):
		HTMLParser.__init__(self)
		self.links = []
		self.host = host
		self.if_para = False
	def handle_starttag(self,tag,attrs):
		if tag == "a":
			attrs = dict(attrs)
			try:
				href = attrs['href']
				self._add_link(href)
			except KeyError:

				pass

			

	def _add_link(self,href):
		if not "http"  in href and "/" in href:
			link = os.path.join(self.host,href)

			if not "http" in link :
				self._add_link(link[1:])
			else :
				self.links += [link]

		
		elif href.startswith("#") or href.startswith('javascript:'):
			pass
		elif "http" in href :
			self.links += [href]
		else:
			pass

	@classmethod
	def get_links(cls,respond,charset="utf8"):
		try:
			url = respond.geturl()

			paraser = cls(url)
			txt = respond.read()
			tet = txt.encode(charset)
			paraser.feed(txt)
			return (list(set(paraser.links)),txt)
		except UnicodeDecodeError ,e:
			print e




				
