"""
This is the code of the PosNET peer-to-peer daemon
  Message are handled here  

"""
from __future__ import generators
#--- standard modules
import os,sys,sets,random
from pprint import pprint
from StringIO import StringIO
#--- modules for multi-threading
try: import thread
except ImportError: import dummy_thread as thread
import threading,thread
from time import sleep,clock
import random
#---- modules for debug/log
from traceback import print_exc
import logging
#---- modules for graph storage
#from networkx.digraph import DiGraph
#----- modules for posnet implementation
from posnet_semantic_graph import *
from posnet_overlay_messages import *
import posnet_overlay_node_simple_queue 
import posnet_tcp_client

import threading
Relations_sym = {"c2p":"p2c", "p2c":"c2p","p2p":"p2p"}

class Counter:
	""" a counter class that is thread-safe """  
	def __init__(self, start=0, increment=1):
		self.counter = start
		self.increment = increment
		self.lock = threading.RLock()
	def __call__(self):
		self.lock.acquire()
		self.counter += self.increment
		i = self.counter
		self.lock.release()
		return i

class posnet_overlay_node(object):
	""" This is an overlay node in posnet
	TODO: documentation describing instanciation. Also provide examples...?
	"""
#---------------------------------
#  configure the object
#---------------------------------
	def __init__(self,Poset,global_id,queue_instancier = None):
		""" constructor => initialize a overlay node in posnet overlay
		    @param Poset: the Poset object representing possible attributes
		    @param global_id: id of this node (may be necessary and sufficient to contact it).
		                      It may be its network id or something similar...
		"""
		object.__init__(self)
		#  the id of this node: this may be its address or something like that
		self._global_id = global_id # the id of this node
		# instanciate local semantic graph
		self._semantic_graph = posnet_local_semantic_graph(Poset)
		# lock are stored per semantic nodes
		self._nodes_to_state = dict()
		self._nodes_to_lock = thread.allocate_lock() #TODO
		# handles are stored per semantic node
		self._nodes_to_handles = dict()
		#----------------------------
		# queue of jobs
		if queue_instancier is None: queue_instancier =  posnet_overlay_node_simple_queue.posnet_overlay_node_simple_queue
		self._myqueue = queue_instancier(self._execute_job)
		#--------------------------
		# Overlay topology
		# tag external semantic nodes
		self._join_pending_nodes = sets.Set() # list of nodes that request a join that failed
		self._external = dict()
		self._overlay_out_edges = dict() # keep track of links with other nodes of the overlay and semantic nodes associated with those links
		self._overlay_in_edges = dict() # keep track of links with other nodes of the overlay and semantic nodes associated with those links
		self._underlay = None # the underlay object -> it is used by overlay nodes ot send messages
		# results : usable for collecting query results during local execution
		self.results = dict() # keys are query_id, values and results
		self.results_wait = dict() # keys are query_id, values are waiting info (as in rcv_answer in posnet_tcp_client)
		self.results_signal = dict() # keys are query_id, values are Event object 
		#-----------------
		# configure handlers for messages
		self._message_dispatcher_to_data = dict() # from a message type to a method name that is called with the data as parameter
		self._configure_message_dispatcher()
		#
		self.query_id = Counter()
		#---
		self.logger = logging.getLogger("%s:%d"%self.id()) #'posnet.ovnode')

	def _configure_message_dispatcher(self):
		""" put here the function names to call when receiving posnet messages
			
		"""
		self._message_dispatcher_to_data = dict() #
		self._message_dispatcher_to_data[posnet_message_type_ping] = "receive_ping"
		self._message_dispatcher_to_data[posnet_message_type_tunnel_reply] = "receive_tunnel_reply"
		self._message_dispatcher_to_data[posnet_message_type_tunnel] = "receive_tunnel"
		self._message_dispatcher_to_data[posnet_message_type_draw] = "receive_draw"
		self._message_dispatcher_to_data[posnet_message_type_graph_request] = "receive_graph_request"
		self._message_dispatcher_to_data[posnet_message_type_getparents_request] = "receive_getparents_request"
		self._message_dispatcher_to_data[posnet_message_type_state_request] = "receive_state_request"
		self._message_dispatcher_to_data[posnet_message_type_query] = "receive_query"
		self._message_dispatcher_to_data[posnet_message_type_result] = "receive_result"
		self._message_dispatcher_to_data[posnet_message_type_finish_send_results] = "receive_finish_send_result"
		self._message_dispatcher_to_data[posnet_message_type_lock] = "receive_lock"
		self._message_dispatcher_to_data[posnet_message_type_unlock] = "receive_unlock"
		# TODOOOO
		self._message_dispatcher_to_data[posnet_message_type_lock_reply] = "receive_lock_reply"
		self._message_dispatcher_to_data[posnet_message_type_join] = "receive_join"
		self._message_dispatcher_to_data[posnet_message_type_join_reply] = "receive_join_reply"
		self._message_dispatcher_to_data[posnet_message_type_split] = "receive_split"
		self._message_dispatcher_to_data[posnet_message_type_better_split] = "receive_better_split"
		self._message_dispatcher_to_data[posnet_message_type_initialize] = "receive_initialize"
		self._message_dispatcher_to_data[posnet_message_type_overlay_add_handle] = "receive_overlay_add_handle"
		self._message_dispatcher_to_data[posnet_message_type_update_topology] = "receive_update_topology"
		self._message_dispatcher_to_data[posnet_message_type_add_handle] = "receive_add_handle"
		self._message_dispatcher_to_data[posnet_message_type_close_link] = "receive_close_link"
		self._message_dispatcher_to_data[posnet_message_type_open_link] = "receive_open_link"	
		# in 2009 
		# TODO : configure sessions between nodes
		#---------------------------------------
##		self._message_dispatcher_to_data[posnet_message_type_store] = "receive_store"
##		self._message_dispatcher_to_source_and_data = dict() #  from a message type to a method name that is called with the source and data as parameter 
##		#self._message_dispatcher_to_source_and_data[posnet_message_type_link_opened] ="receive_link_opened"
##		self._message_dispatcher_to_source_and_data_and_socket = dict() #  from a message type to a method name that is called with the source and data as parameter 		

	
	def __str__(self):
		""" convert this object to a lightweight string
		    @return: a string represenation of the object
		"""
		return "["+str(self.id_str())+"]"
	def set_debug(self,value):
		""" enable or disable debugging mode
		    when enabled, the node becomes verbose on the console
		"""
		if value: self.logger.setLevel(logging.DEBUG)
		else: self.logger.setLevel(logging.INFO)
		logging.disable(logging.INFO)
		logging.disable(logging.DEBUG)
	
#---------------------------------------------	
#  communications between overlay nodes
#  + Id of the node and the underlay
#----------------------------------------------
	def id(self):
		""" @return : the id of this node.
			      The id may be its address or something like that; it depends on what has been specified in constructor
		"""
		return self._global_id

	def id_str(self):
		""" @return : the id of this node with well formatting
			      The id may be its address or something like that; it depends on what has been specified in constructor
		"""
		if type( self._global_id ) in [ type( []), type( (1,2) )]:
			return "__".join([str(x).replace(".","_") for x in self._global_id])
		else: return str(self._global_id)
	def get_poset(self):
		""" Get the poset of this node 
			@return: poset of this node
		"""
		return self._semantic_graph.poset()

#----------------------------------------------
# communication: send, receive (using underlay object) 
#----------------------------------------------


	def set_underlay(self,underlay):
		""" set the underlay object that is used to send messages between nodes
		    @param underlay: an object			
		"""
		self._underlay = underlay
		self._underlay.create_loopback_tunnel()

	def send(self,overlay_message,answer_type = None):
		""" send a message accross the overlay or to the node itself
		    @param overlay_message: the message object to be send
		    @param answer_type: the answer_type for the message (optional)
		"""
		if overlay_message.target == self.id(): return self.receive(overlay_message)
		else:
			if self._underlay is None: raise Exception("no underlay defined!")
			return self._underlay.propagate_message(overlay_message,answer_type,self.id())

	def receive(self,overlay_message,client_socket = None):
		""" a message is being received.
		    this method decides for which action to do depending on the message_type

		    @param overlay_message: the posnet_overlay_message	
		    @param client_socket: optional parameter specifing a socket for a possible reply
		"""
		message_type = overlay_message.message_type
		# debug time
		t_start = clock()
		# check message
		if not self._message_dispatcher_to_data.has_key(message_type):
			self.logger.error("receive: unrecognized overlay message type="+str(message_type))
			raise Exception("receive: unrecognized overlay message type="+str(message_type))
		# dispatch to right handler function depending on the message_type
		function_name = self._message_dispatcher_to_data[message_type]
		#self.logger.info("receive -> %s" % str(function_name))	
		try:
			function = getattr(self,function_name)
			# call handler with the arguments
			additional_print = ""
			is_query = False
			is_parents = False
			if function_name == "receive_query":
				is_query = True
				additional_print += "\"%s\" "%str(overlay_message.data.query_type)
				if overlay_message.data.query_type ==  "parents (Point query)": is_parents = True
			try:
				#self.logger.info("%s %s[BEGIN]"  % (str(function_name),str(additional_print), ))
				return function(overlay_message.source, overlay_message.data, client_socket, overlay_message.answer_type)
			finally:
				t_end = clock()
				duration = t_end - t_start
				if  True or (is_query or duration > 0.0 or True) and not is_parents:
					self.logger.info("%s %s[%f s.]"  % (str(function_name),additional_print,duration))
			#------------
#			import cProfile, pstats
#			def real_main():
#				return function(overlay_message.source, overlay_message.data, client_socket, overlay_message.answer_type)
#			prof = cProfile.Profile()
#			prof = prof.runctx("real_main()", globals(), locals())
#			print "<pre>"
#			stats = pstats.Stats(prof)
#			stats.sort_stats("time")  # Or cumulative
#			stats.print_stats(200)  # 80 = how many to print
#			# The rest is optional.
#			#stats.print_callees()
#			#stats.print_callers()
#			print "</pre>"
#			sys.stdout.flush()
	
		except Exception,e:
			self.logger.error("receive("+str(message_type)+"): error_message="+str(e))
			from traceback import print_exc
			print_exc()
			sys.stderr.flush()
			raise

	def _reply_message(self,m,client_socket = None,answer_type = None,query_id = None,send_end_or_close= None):
		""" reply an overlay message
		    @param m: message
		    @param client_socket: optional parameter specifying a socket when answer_type is 0/ or a peer addr if answer_type > 0
		    @param answer_type: optional parameter specifying a type for answer (default answer is in the socket
		    @param query_id: optional parameter specifying a query_id -> this parameter my not always make sense!
		"""
		#DEBUG
		replay_msg_str = "_reply_message:"
		if send_end_or_close is None: send_end_or_close = True
		if query_id is not None: replay_msg_str = "query #"+str( query_id)+": reply_message"
		#----
		
		if answer_type == 0:
			if client_socket is not None:
				try:
					if m is not None:
						posnet_tcp_client.send_posnet_message_over_tcp_socket(m,client_socket,True)
					if send_end_or_close: client_socket.close()
				except:
					self.logger.error("during _reply_message [inside socket]: %s"%str(e))
					raise
		else:
			add,port = client_socket
			if m is not None:
				posnet_tcp_client.send_answer_message(add,port,m)
			if send_end_or_close:
				posnet_tcp_client.send_answer_end(add,port,self.id(),query_id)
		# when this fuction is called (network is not used),
		# return the message?
		return m


#----------------------------------------------------------------------
# DEBUG PURPOSES: ping, graph, state, draw...
#----------------------------------------------------------------------	
	def receive_ping(self,message_source, message_data,client_socket,answer_type):
		""" handle reception of a ping query """
		#####self.logger.info("ping message received from "+str(message_source))
		m  = posnet_ping_reply_message( self.id(), message_source,self._semantic_graph.poset())
		self._reply_message(m,client_socket,answer_type)

	def receive_graph_request(self,message_source, message_data,client_socket,answer_type):
		""" handle reception of a draw query """
		#if client_socket is None: return
		#####self.logger.info("graph request message received from "+str(message_source))
		replied_data = dict()
		replied_data["vertex_id"] = self._semantic_graph.get_vertex_id_dict()	
		replied_data["data"] = [x for x in self._semantic_graph.iter_edges()]
		m  = posnet_graph_result_message( self.id(), message_source,replied_data)
		self._reply_message(m,client_socket,answer_type)

	def receive_getparents_request(self,message_source, message_data,client_socket,answer_type):
		""" handle reception of a draw query """
		#if client_socket is None: return
		#####self.logger.info("graph request message received from "+str(message_source))
		replied_data = []
		for space_point in message_data:
			space_point_copy = self._semantic_graph.hashable_space_point(space_point)
			vid = self._semantic_graph.vertex_id(space_point_copy)
			#parents = [x for x  self._semantic_graph.data(vid)["parents"]
			tab = []
			self._nodes_to_lock.acquire() # NEW
			for vertex_id in self._semantic_graph.iter_parents(vid):
				#parents.append(vertex_id)
				state_data = self._nodes_to_state[vertex_id]
				po = (state_data[0],self._semantic_graph.space_point(vertex_id),self.id())
				tab.append(po)
			self._nodes_to_lock.release()	
			replied_data.append( tab )
		m  = posnet_getparents_result_message( self.id(), message_source,replied_data)
		self._reply_message(m,client_socket,answer_type)

	def receive_draw(self,message_source, message_data,client_socket,answer_type):
		""" handle reception of a draw query """
		#if client_socket is None: return
		#####self.logger.info("draw message received from "+str(message_source))
		out = cStringIO.StringIO()
		ext = self.write_dot(out,message_data) # message data may a boolean that means external or not 
		picture = out.getvalue()
		out.close()
		replied_data = dict()
		replied_data["picture"] = picture	
		replied_data["neighbors"] = self.get_neighbors()
		replied_data["ext"] = ext
		replied_data["graph"] = self.get_raw_graph() 
		m  = posnet_picture_message( self.id(), message_source,replied_data)
		self._reply_message(m,client_socket,answer_type)
	
	def _send_state_request(self,overlay_node_destination_id,space_points):
		m   = posnet_state_resquest_message( self.id(), overlay_node_destination_id ,space_points)
		m.answer_type = 0
		state_reply  = self.send( m, 0)
		return state_reply

	def send_state_request(self,overlay_node_destination_id,space_points):
		if overlay_node_destination_id == self.id():
			return self._receive_state_request(space_points)	
		else:
			return self._send_state_request(overlay_node_destination_id,space_points)

	def _receive_state_request(self,message_data):
		replied_data = dict()
		#------------
		vertex_id_dict  = self._semantic_graph.get_vertex_id_dict()
		if message_data is None or type(message_data) != type( [] ):
			for space_point,vertex_id in vertex_id_dict.items():
				######self.logger.info("get state of vertex_id="+str(vertex_id)+", space_point="+str(space_point))
				try:
					self._nodes_to_lock.acquire() # NEW
					state_data = self._nodes_to_state[vertex_id]
					replied_data[space_point] = (state_data[0],state_data[1])
				finally:
					self._nodes_to_lock.release()
		else:
			for space_point in message_data:
				try:
					vertex_id = vertex_id_dict[space_point]
					try:
						self._nodes_to_lock.acquire() # NEW
						state_data = self._nodes_to_state[vertex_id]
						replied_data[space_point] = (state_data[0],state_data[1])
					finally:
						self._nodes_to_lock.release()
				except Exception,e:
					self.logger.error("unable to find point %s while state_request"%space_point) 
		return replied_data
		#------------
	
	def receive_state_request(self,message_source, message_data,client_socket,answer_type):
		""" handle reception of a state_request query """
		#if client_socket is None: return
		#####self.logger.info("state_request message received from "+str(message_source))
		replied_data = self._receive_state_request(message_data)	
		m  = posnet_state_result_message( self.id(), message_source,replied_data)
		self._reply_message(m,client_socket,answer_type)
		

#------------------------
#  query processing
#  + multi-threading : job/queue management
#------------------------
	def _enqueue_query(self,job):
		""" this add a query to the queue """
		self._myqueue.enqueue(job)
		
	def _process_queue(self):
		""" this explicitely process the queue.
		    this method does nothing when job are automatically executed whithin the queue
		    this method does something when the queue is default (i.e. a   posnet_overlay_node_simple_queue.posnet_overlay_node_simple_queue )
		"""
		self._myqueue.process_queue()
		
	def _execute_job(self,job):
		""" this method simply executes a job : this method is called  within queue internal processes 
		    @param job: details about the job to execute
		"""
		try:
			t_start = time.clock()
			tab_sources_targets,query,requester = job
			by_overlay_node_propagation = dict()
			by_overlay_node_query_id = dict()
			# Requester
			if query.requester is not None and query.requester[0] is not None:
					requester = query.requester
			# Result for requester
			result_msg = query.result()
			debug_hdr_msg = "query #"+str(query.id)+": "
			# iterate over multiple queries...
			#self.logger.info("query %s - %d jobs (requester=%s)"%(str(query.id),len(tab_sources_targets),str(requester)))
			#---------
			job_result = posnet_process_jobs(self._semantic_graph,self._external,tab_sources_targets,query)
			results,propagation,n_iterations_tot = job_result
			#---------
			# give an answer to the requester (results + propagation info)
			if requester is not None:
				# record results
				for vertex_id,result in results:
					state = -1
					try:
						self._nodes_to_lock.acquire() # NEW
						state = self._nodes_to_state[vertex_id][0]
					finally:
						self._nodes_to_lock.release() # NEW
					result_msg.add_result(result,state)
			# record propagation next nodes
			for next in propagation:
				next_overlay_node  = self._external[next[-1]]
				next_space_point = self._semantic_graph.space_point(next[-1])
				last_space_point = self._semantic_graph.space_point(next[-2])
				if not by_overlay_node_propagation.has_key(next_overlay_node):
					query_id_propag = self.new_query_number(next_overlay_node)
					by_overlay_node_query_id[next_overlay_node]=query_id_propag
					by_overlay_node_propagation[next_overlay_node] = []
				by_overlay_node_propagation[next_overlay_node].append(  (next_space_point,last_space_point) )
			if requester is not None:
				# record propagation in answer
				for next_overlay_node, dat_next_last in by_overlay_node_propagation.items():
					query_id_propag = by_overlay_node_query_id[next_overlay_node]
					result_msg.add_next( (next_overlay_node,query_id_propag,dat_next_last) )
			t_end = time.clock()
			self.logger.info("query %s - %d jobs - %d iterations [ %s s.](requester=%s)"%(str(query.id),len(tab_sources_targets),n_iterations_tot,str(t_end-t_start),str(requester)))
			transfer_to_requester_ok = False
			try:
				if requester is not None:
					# answer results to requester
					result_msg.data["time"] = str(t_end-t_start)
					result_msg.data["iterations"] = n_iterations_tot 
					result_msg.data["jobs"] = len(tab_sources_targets)
					t_start = time.clock()
					message = posnet_result_message(self.id(),requester,result_msg)
					requester_add,requester_port = requester
					posnet_tcp_client.send_answer_message(requester_add,requester_port,message)
					posnet_tcp_client.send_answer_end(requester_add,requester_port,self.id(),query.id)
					t_end = time.clock()
					self.logger.info("query_answer %s [ %s s.](requester=%s)"%(str(query.id),str(t_end-t_start),str(requester)))
					transfer_to_requester_ok = True
			except Exception,e:
				self.logger.error("abort Query because answering data %s to requester %s failed: %s"%(str(query.id),str(requester),str(e)))
			#-------------------------------------------------------------------------------------------------------
			if transfer_to_requester_ok:
					for next_overlay_node, set_of_source_dests in by_overlay_node_propagation.items():
						try:
							#TODO ? 
							myquery = query.copy()
							for next_space_point,last_space_point in set_of_source_dests:
								myquery.add(  next_space_point,last_space_point  )
							query_id_propag = by_overlay_node_query_id[next_overlay_node]
							myquery.id = query_id_propag
							# send query mechanism
							m = posnet_query_message(self.id(),next_overlay_node,myquery)
							self.logger.info("propagate query %s to %s"%(str(query_id_propag),str(next_overlay_node)) )
							self.send(m)
						except Exception,e:
							self.logger.error("sending query %s to node %s failed: %s"%(str(query.id),str(next_overlay_node),str(e)))
			#return job_result # TODO: remove?
		except Exception,e:
			self.logger.error("'_execute_job method': queue processing at node "+str(self)+":"+str(e))
			print_exc()
			sys.stderr.flush()

	def _process_job_iter(self,source_vertex_id,target_vertex_id,query,already_propagated = None): 
		""" process a query assumed assuming to be at a semantic node
		this method may do the following things:
		- propagate the query to next semantic nodes
		- return results (as table of posnet_result_message)
		"""
		#--------------	
		allowed_to_execute = True
		list_type = type( (1,2) )
		link_type = None
		if True:
			try:
				link_type = self._semantic_graph.get_link_type( source_vertex_id, target_vertex_id)
				alrs = len(already_propagated)
				already_propagated.add( ( target_vertex_id, link_type) )
				if alrs == len(already_propagated):
					allowed_to_execute = False
			except: pass
		#--------------
		if allowed_to_execute:
			tstart = time.clock()
			source_link_type = None
			if source_vertex_id is not None:
				try: source_link_type = self._semantic_graph.get_link_type(source_vertex_id,target_vertex_id)
				except: source_link_type = self._semantic_graph.get_link_type_sym(target_vertex_id,source_vertex_id)
			for returned_info in self._semantic_graph.process_query(target_vertex_id,query.query_type,query.query,source_vertex_id,source_link_type):
				######self.logger.info("query #"+str(query.id)+": from semantic graph="+str(returned_info))
				if type(returned_info) == list_type:
					request_data_to_propagate,destination = returned_info
					# at this point, this is a propagation
					thelinktype = None
					try:
						thelinktype = self._semantic_graph.get_link_type(target_vertex_id,destination)
					except:
						thelinktype = self._semantic_graph.get_link_type_sym(destination,target_vertex_id)
					if (destination,thelinktype) not in already_propagated:
						if not self._external.has_key(destination):
							yield 0,destination # external propagation
						else:
							yield 1,destination # internal propagation
				else:
					# this is a match, return result
					vertex_id = returned_info
					space_point = self._semantic_graph.space_point(vertex_id)
					vertex_state = None
					try:
						self._nodes_to_lock.acquire() # NEW
						vertex_state = self._nodes_to_state[vertex_id][0]
					finally:
						self._nodes_to_lock.release() # NEW
					yield 2,(space_point,vertex_state)
			#self.logger.info("process_query %s"%str(time.clock()-tstart))
				
	def _process_job(self,source_vertex_id,target_vertex_id,query,already_propagated = None):
		#"blabla
		#tstart = time.clock()
		external_propagation = sets.Set() # []
		internal_propagation = sets.Set() # []
		if already_propagated is None: already_propagated = sets.Set()
		results = sets.Set() # []
		internal_propagation.add(  (source_vertex_id,target_vertex_id)  )
		#------------
		n_iterations = 0
		while len(internal_propagation) > 0:
			source_vertex_id,target_vertex_id = internal_propagation.pop()
			n_iterations += 1
			for code,dat in self._process_job_iter(source_vertex_id,target_vertex_id,query,already_propagated):
				######self.logger.info("_process_job: iter on code="+str(code)+", dat="+str(dat))
				if code == 0: # internal propagation
					_destination = dat 
					internal_propagation.add( (target_vertex_id,_destination ) )
				elif code == 1: # external propagation
					_destination = dat
					external_propagation.add( (target_vertex_id,_destination ) )
				elif code == 2: # result
					results.add( (self._semantic_graph.hashable_space_point(dat[0]),dat[1]) )
		#####self.logger.info("query #"+str(query.id)+": end "+dbg+" "+str(query.query_type)+"( "+str(query.query)+" )")
		#tend = time.clock()
		#self.logger.info("propag: %s"%str(tend-tstart))
		return (results,external_propagation,n_iterations)

	def _query_to_job(self,source_and_targets,query,requester  = None):
		""" answer a query. this method is called when a query is received at the overlay node.
		    this method enqueues the job to the queue for further treatment
		"""
		tab_sources_targets = []
		for space_point_source,space_point_dest in source_and_targets:
			#--- identify source node
			source_vertex_id = None
			try:
				if space_point_source is not None: source_vertex_id = self._semantic_graph.vertex_id(space_point_source)
			except Exception,e:
				self.logger.warning("at "+str(self)+" unknown source node in query : %s" % e)
				space_point_source = None
				raise
			#--- identify target node
			target_vertex_id = self._semantic_graph.vertex_id(space_point_dest)
			# --
			tab_sources_targets.append(  (source_vertex_id,target_vertex_id)  )
		#---- create 'job'
		job = (tab_sources_targets,query,requester)
		#---- treat job
		######self.logger.info("_query_to_job: enqueue query "+str(space_point_source)+" -> "+str(space_point_dest))
		self._enqueue_query( job )
		self._process_queue() # TODO may be remove when queue processing will be autnomous

	def new_query_number(self,id):
		hash_str = str( hash(str(id)) + self.query_id() )
		return hash_str
		

	def receive_query(self,requester_id,query_message,client_socket = None,answer_type = None):
		""" this method is called when a query is received by this overlay node"""
		# choose target semantic nodes if not found
		if len(query_message.source_and_targets) == 0:
		#if query_message.target is None:
			if len(self._semantic_graph) > 0:
				random_node = self._semantic_graph.random_node()
				if self._external.has_key( random_node ):
					random_node = random.choice( self._semantic_graph.predecessors( random_node ) )	
				random_space_point = self._semantic_graph.space_point( random_node )
				#query_message.target = random_space_point
				query_message.add(random_space_point)
				#####self.logger.info("receive_query: generate target of query =" +str(random_space_point))
			else: #node is empty!!!!!
				#####self.logger.info("receive query: " +str(query_message.query_type)+" but no node to enter because i'm empty")
				self._reply_message(None,client_socket,1)
				return None
		if query_message.id is None: query_message.id = self.new_query_number(self.id())
		# determine requester (neighbor in socket or source indicated in the message)
		requester = None
		if client_socket is not None:
			if answer_type == 0: requester = client_socket.getpeername()
			else: requester = client_socket
		#else: requester =  query_message.source
		#TODO?
		#####self.logger.info("query #"+str(query_message.id)+" <-- requester="+str(query_message.requester))
		if query_message.requester is None or  query_message.requester[0] is None: query_message.requester = requester
		# query requester
		query_requester = query_message.requester
		if   query_message.requester != requester:
			if  requester is not None:
				#####self.logger.info("query #"+str(query_message.id)+": reply to neighbor - close its connection")
				self._reply_message(None,client_socket,answer_type)
			requester = query_requester
		elif client_socket is not None and answer_type == 0: client_socket.close()
		# treat query
		#####self.logger.info("query #"+str(query_message.id)+": enqueue job= "+query_message.source_and_targets_str(""))
		self._query_to_job(query_message.source_and_targets,query_message,requester)
		# answer is done when the job is really executed!
		return None
					

	def send_query(self,overlay_node_destination_id,query_content):
		""" pack a query message and send it
		    @param overlay_node_destination_id: destination of the query
		    @param query_content: the content of the query
		"""
		message = posnet_query_message(self.id(),overlay_node_destination_id,query_content)
		return self.send(message)
	
	def receive_finish_send_result(self,answerer_id,result_message,client_socket = None,answer_type = None):
		""" this method is called when a finish_send_result is received by this overlay node"""
		if result_message is None: return	
		query_id = result_message
		#---------------------------
		towait = answerer_id
		# handle end
		try:
			if towait in self.results_wait[query_id]: 
				self.results_wait[query_id].remove(towait)
		except KeyError:
			pass
		try:
			if len(self.results_wait[query_id]) == 0:
				del self.results_wait[query_id]
		except KeyError: pass
		if not self.results_wait.has_key(query_id):
			#### FINISH	
			try:
				#if not self.results_signal[query_id].isSet( ):
				self.logger.info("%s set result ok %f"%(str(query_id),time.clock()))
				self.results_signal[query_id].set()
			except KeyError: pass
	

	def receive_result(self,answerer_id,result_message,client_socket = None,answer_type = None):
		""" this method is called when a result is received by this overlay node"""
		if result_message is None: return	
		query_id = result_message.query_id
		# mark next nodes for waiting
		for waddr in posnet_tcp_client.recv_answers_iter_extract_next(result_message):
			try:
				self.results_wait[query_id].add(waddr)
			except KeyError:
				self.results_wait[query_id] = sets.Set()
				self.results_wait[query_id].add(waddr)
		#---------------------------
		towait = answerer_id
		#handle result
		result = result_message
		try:
			self.results[query_id][answerer_id].add(result_message)
		except KeyError:
			try: thedico = self.results[query_id]
			except KeyError: self.results[query_id] = dict()
			self.results[query_id][answerer_id] = sets.Set()
			self.results[query_id][answerer_id].add(result_message)
		try:
			self.results_wait[query_id].add(towait)
		except KeyError:
			self.results_wait[query_id] = sets.Set()
			self.results_wait[query_id].add(towait)
		#if client_socket is not None:
		#	client_socket.close()
	
##	def result_messages_to_query_results(self,result_messages):
##		ret = []
##		for result_message in result_messages:	
##			results = result_message.data.results
##			states = result_message.data.states
##			query = result_message.data.query
##			algorithm = result_message.data.query_type
##			answerer_id = result_message.source
##			ret.append( (query,algorithm,results,answerer_id) )
##		return ret
	def result_messages_to_query_results2(self,result_messages):
		ret = []
		for result_message in result_messages:	
			results = result_message.data.results
			states = result_message.data.states
			query = result_message.data.query
			algorithm = result_message.data.query_type
			answerer_id = result_message.source
			for result_i in xrange(len(results)): # states!
				ret.append( (states[result_i],results[result_i],answerer_id) )
		return ret

	def result_messages_to_query_results3(self,result_messages_tab2):
		ret = []
		try:
			for answerer_id,result_messages in result_messages_tab2.items():	
				for result_message in result_messages:	
					results = result_message.results
					states = result_message.states
					query = result_message.query
					algorithm = result_message.query_type
					for result_i in xrange(len(results)): # states!
						ret.append( (states[result_i],results[result_i],answerer_id) )
		except:
			print result_messages_tab2
			sys.stdout.flush()
			raise
		return ret



#------------------------------------------------
# Add data to the overlay
#--------------------------------------------------
	def receive_overlay_add_handle(self,message_source,add_handle_message,client_socket = None,answer_type = None):
		peer = self._overlay_add_handle(add_handle_message.space_point,add_handle_message.handle)	
		self._reply_message(None,client_socket,answer_type)
		self._retry_pending_join()	

	def _overlay_point_query_self(self,algorithm,space_point):
		if len(self._semantic_graph) == 0: return []
		# TODO tunnel
		if False and True:
			query_results = []
			
			query = posnet_semantic_multiquery(self.id(),None,algorithm,space_point)
			# prepare receiving
			if query.id is None: query.id = self.new_query_number(self.id())
			self.results_signal[query.id] = threading.Event()
			#
			m = posnet_query_message(self.id(),self.id(),query)
			m.answer_type = self.id()[1]
			thread.start_new_thread(self._underlay.propagate_message,(m,m.answer_type))
			# wait for results
			self.results_signal[query.id].wait(20)
			self.logger.info("%s get result for query %f"%(str(query.id),time.clock()))
			query_results = dict()
			try: query_results = self.results[query.id]
			except KeyError: pass
			try: del self.results_signal[query.id]
			except: pass
			try:
				return self.result_messages_to_query_results3(query_results)
			finally:
				try: del self.results[query.id]
				except: pass
		else:
			query_results = posnet_tcp_client.posnet_point_query( self.id(), algorithm ,space_point,None,None,self.logger)
			return self.result_messages_to_query_results2(query_results)
	
	def _overlay_point_query(self,destination,algorithm,space_point,source = None,entring_node = None):
		query_results = posnet_tcp_client.posnet_point_query( destination, algorithm ,space_point,source,entring_node,self.logger)
		return self.result_messages_to_query_results2(query_results)
	def _overlay_all_parents(self,space_point):
		return self._overlay_point_query_self("all_parents (Point query)",space_point)

	def _overlay_all_children(self,space_point):
		return self._overlay_point_query_self("all_children (Point query)",space_point)

	def _overlay_all_top(self):
		return self._overlay_point_query_self("all_top (Point query)",None)

	def _overlay_parents(self,space_point,over_node):
		return self._overlay_point_query(over_node,"parents (Point query)",space_point,None,space_point)
	
	def _send_getparents(self,overlay_node_destination_id,space_points):
		#message = posnet_getparents_request_message(self.id(),overlay_node_destination_id,space_points) 
		#result = self.send(message,0)
		return posnet_tcp_client.posnet_getparents_request(overlay_node_destination_id,space_points,0)


	def _overlay_add_handle(self,space_point,handle):
		result = self._overlay_add_handle_with_lock(space_point,handle)
		if result is None:
			self.logger.info("******** _overlay_add_handle: failed!")
		return result

		#while result is None:
			# pass
		#	rand_waittime = random.randint(1,20)
		#	#####self.logger.info("_overlay_add_handle: wait "+str(rand_waittime)+" s")
		#	sleep(rand_waittime)
		#	result = self._overlay_add_handle_with_lock(space_point,handle)
			
	
	def _overlay_add_handle_with_lock(self,space_point,handle):
		#------------------------------------------------------
		poset_keys = self._semantic_graph.poset().keys() 
		poset_keys.sort()
		# lock data
		states = dict()
		to_lock = dict() #keys= overlay_nodes, values = semantic node sets
		#------------------------------------------------------
		# find a place : get parents info
		parents = sets.Set()
		parents_overlay_nodes = dict()
		for parent in self._overlay_all_parents(space_point):
			#####self.logger.info("parent received: "+str(parent))
			object_description = self._semantic_graph.hashable_space_point(parent[-2])
			object_overlay_node = parent[-1]
			parents.add( (object_description ,object_overlay_node )  )
			parents_overlay_nodes[ object_description ]  = object_overlay_node
			# lock data
			object_state = parent[0]
			if states.has_key(object_description):
				state_and_node = states[object_description]
				if state_and_node[0] !=  object_overlay_node:
					self.logger.error("vertex %s has been transfered to another node"%str(object_description))
					return None
				elif state_and_node[1] !=  object_state: 
					self.logger.error("vertex %s has changed during addition"%str(object_description))
					return None
			states[ object_description ]  = ( object_overlay_node , object_state )
			#---
		#------------------------------------------------------
		space_point_copy = self._semantic_graph.hashable_space_point(space_point)
		found_in_parents = None
		for parent in parents:
			if parent[0] == space_point_copy:
				overlay_node_container = parent[1]
				#####self.logger.info("_overlay_add_handle: point "+str(space_point_copy)+" already_exist in "+str(overlay_node_container))
				# TODO lock before add handle?
				self._send_add_handle(overlay_node_container,space_point,handle)
				found_in_parents = overlay_node_container
				break
		# the semantic was already found
		if found_in_parents is not None: return found_in_parents
		#----------------------
		# find a place : get children info
		children = sets.Set()
		for child in self._overlay_all_children(space_point):
			object_description = self._semantic_graph.hashable_space_point(child[-2])
			object_overlay_node = child[-1]
			item = (object_description ,object_overlay_node)
			children.add( item )
			# lock data
			object_state = child[0]
			if states.has_key(object_description):
				state_and_node =  states[object_description]
				if state_and_node[0] !=  object_overlay_node:
					self.logger.error("vertex %s has been transfered to another node"%str(object_description))
					return None
				elif state_and_node[1] !=  object_state: 
					self.logger.error("vertex %s has changed during addition"%str(object_description))
					return None
			states[ object_description ]  = ( object_overlay_node , object_state )
			#----
		#####self.logger.info("_overlay_add_handle: found parents "+str(space_point_copy) +"="+str(parents))
		#####self.logger.info("_overlay_add_handle: found children "+str(space_point_copy) +"="+str(children))
		#----------------------
		# find a place : get core
		top_set = sets.Set()
		for top in self._overlay_all_top():
			object_description = self._semantic_graph.hashable_space_point(top[-2])
			object_overlay_node = top[-1]
			item = (object_description ,object_overlay_node)
			top_set.add(item)
			# lock data
			object_state = top[0]
			if states.has_key(object_description):
				state_and_node =  states[object_description]
				if state_and_node[0] !=  object_overlay_node:
					self.logger.error("vertex %s has been transfered to another node"%str(object_description))
					return None
				elif state_and_node[1] !=  object_state: 
					self.logger.error("vertex %s has changed during addition"%str(object_description))
					return None
			states[ object_description ]  = ( object_overlay_node , object_state )
			#----
		#------------------------------------------------------
		delete_links = []
		add_links = []
		#------------------------------------------------------
		for child in children:
			if child in top_set:
				# delete this node from top
				for top_node in top_set:
					if top_node == child: continue	
					delete_links.append(  (child,top_node) )
					delete_links.append(  (top_node,child) )
				top_set.remove(child)
		#------------------------------------------------------
		# find overlay node that will own the new semantic node	
		owner_node = self.id()
		if len(parents)+len(children) > 0:
			ranking = dict()
			for c in children:
				sp,on = c
				if not ranking.has_key(  on ): ranking[on] = 0
				ranking[on] += 1
			for p in parents:
				sp,on = p
				if not ranking.has_key(  on ): ranking[on] = 0
				ranking[on] += 1
			ranking_tab = [ (v,k) for k,v in ranking.items()]
			ranking_tab.sort()
			ranking = [ elem[1] for elem in ranking_tab if elem[0] == ranking_tab[-1][0] ]
			owner_node = ranking[ random.choice( range(len(ranking)) ) ]
		#####self.logger.info("_overlay_add_handle: overlay node owner="+str(owner_node))
		#------------------------------------------------------------
		new_node = ([x for x in space_point_copy],owner_node)
		#------------------------------------------------------------
		if len(parents) == 0: # add node in core
			for peer   in top_set: add_links.append( (new_node,peer,"p2p","p2p") )
		else: # node is not part of the core
			for parent in parents: add_links.append( (new_node,parent,"c2p","p2c") )
		parents_of_children_o = dict()
		parents_of_children = dict()
		for child in children:
			r_child,over_node = child
			try:
				parents_of_children_o[over_node].append( r_child )
			except KeyError:	
				parents_of_children_o[over_node] = [r_child]
		for oo,dat in parents_of_children_o.items():
			self.logger.info("ask %s parents of %d nodes"%(str(oo),len(dat)))
			parents = self._send_getparents(oo,dat)
			#self.logger.info("%d parents received"%len(parents))
			if len(parents) != len(dat): raise Exception("aaah")
			dd = dict()
			for ii in range(len(dat)):	
				parents_of_children[ dat[ii] ] = parents[ii]
		for child in children:
			parents_of_child_container_of_node = []
			parents_of_child_contained_by_node = []	
			r_child,over_node = child
			#parents_of_child = self._overlay_parents( r_child,over_node  )	
			parents_of_child = parents_of_children[r_child]
			#if len(parents_of_child) != len(parents_of_child_):
			#	self.logger.info(str(parents_of_child))
			#	self.logger.info(str(parents_of_child_))
			#	raise Exception("ahhh")
			#self.logger.info("child "+str(child)+" ----> its parents")
			for pchild in parents_of_child:
				r_pchild_dict,node = pchild[-2],pchild[-1]
				r_pchild = self._semantic_graph.hashable_space_point(r_pchild_dict)
				if parents_overlay_nodes.has_key(r_pchild):
					node = parents_overlay_nodes[r_pchild]
				#  [r_pchild_dict[x] for x in poset_keys] 
				comp_value = compare(space_point,r_pchild)
				if comp_value  == 1:
					parents_of_child_container_of_node.append((r_pchild,node))
				elif comp_value == 2:
					parents_of_child_contained_by_node.append((r_pchild,node))
			if len(parents_of_child_contained_by_node) == 0:
				for pchild in parents_of_child_container_of_node:
					###self.logger.debug("LINK\tremove child to its parent "+str(child)+" "+str(pchild))
					delete_links.append( ( child,pchild ) )
				###self.logger.debug("LINK\tadd to child "+str(child)+" "+str(new_node))
				add_links.append( (child,new_node,"c2p","p2c") )
		by_peer = dict() # node : [ add, ext, remove ]
		for link in add_links:
			s,t,r1,r2 = link	
			space_point1_t,onode1 = s 
			space_point2_t,onode2 = t
			space_point1  = dict()
			space_point2  = dict()
			xi = 0
			for attr in space_point1_t:
				space_point1[poset_keys[xi][-1]] = attr
				xi += 1
			xi = 0
			for attr in space_point2_t:
				space_point2[poset_keys[xi][-1]] = attr
				xi += 1
			if onode1 == onode2:
				if not by_peer.has_key(onode1): by_peer[onode1] = [ [], [] ,[] ]  
				by_peer[onode1][0].append( (space_point1,space_point2,r1,r2) )
			else:
				if not by_peer.has_key(onode1): by_peer[onode1] = [ [], [] ,[] ]  
				by_peer[onode1][1].append( (onode2,space_point1,space_point2,r1,r2) )
				if not by_peer.has_key(onode2): by_peer[onode2] = [ [], [] ,[] ]  
				by_peer[onode2][1].append( (onode1,space_point2,space_point1,r2,r1) )
		for link in delete_links:
			s,t = link	
			space_point1,onode1 = s 
			space_point2,onode2 = t
			#####self.logger.info("_overlay_add_handle: program link deletion "+str( (space_point1,onode1) ) + " -> "+str( (space_point2,onode2)))
			if onode1 == onode2:
				if not by_peer.has_key(onode1): by_peer[onode1] = [ [], [] ,[] ]  
				by_peer[onode1][2].append( (space_point1,space_point2) )
				by_peer[onode1][2].append( (space_point2,space_point1) )
			else:
				if not by_peer.has_key(onode1): by_peer[onode1] = [ [], [] ,[] ]  
				by_peer[onode1][2].append( (space_point1,space_point2) )
				by_peer[onode1][2].append( (space_point2,space_point1) )
				if not by_peer.has_key(onode2): by_peer[onode2] = [ [], [] ,[] ]  
				by_peer[onode2][2].append( (space_point2,space_point1) )
				by_peer[onode2][2].append( (space_point1,space_point2) )
		### lock data
		##if not to_lock.has_key( child[1] ): to_lock[child[1]] = sets.Set()
		##to_lock[child[1]].add(child[0] )
		###----

		# TRY TO LOCK :)
		to_unlock = dict()
		everything_is_locked = True
		try:
			hspace_point = self._semantic_graph.hashable_space_point(space_point)
			#pprint(by_peer)
			#pprint(states)
			for peer, tables in by_peer.items():
				space_points_and_states = dict()
				for internal_link in tables[0]:
					a = self._semantic_graph.hashable_space_point(internal_link[0])
					if a != hspace_point and not space_points_and_states.has_key(a): space_points_and_states[a] = states[a]
					b = self._semantic_graph.hashable_space_point(internal_link[1])
					if b != hspace_point and not space_points_and_states.has_key(b): space_points_and_states[b] = states[b]
				for external_link in tables[1]:
					a = self._semantic_graph.hashable_space_point(external_link[1])
					if a != hspace_point and not space_points_and_states.has_key(a): space_points_and_states[a] = states[a]
					b = self._semantic_graph.hashable_space_point(external_link[2])
					if b != hspace_point and not space_points_and_states.has_key(b): space_points_and_states[b] = states[b]
				for internal_link in tables[2]:
					a = self._semantic_graph.hashable_space_point(internal_link[0])
					if a != hspace_point and  not space_points_and_states.has_key(a): space_points_and_states[a] = states[a]
					b = self._semantic_graph.hashable_space_point(internal_link[1])
					if b != hspace_point and not space_points_and_states.has_key(b): space_points_and_states[b] = states[b]
				# send lock
				lock_result = self.send_lock( peer, space_points_and_states)
				#####self.logger.info("Lock result on "+str(peer)+": "+str(lock_result))
				if not lock_result: raise Exception('one node from '+str(peer)+' was not successfully locked!')
				to_unlock[peer] = space_points_and_states
				# 
		except Exception,e:
			everything_is_locked = False
			# Exception -> cancel lock, wait for a random moment and exit with false ret code
			self.logger.error("error while locking : begin cancel operation")
			from traceback import print_exc
			print_exc()
			# now unlock (in fact cancel!)!
			for peer, space_points_and_states in to_unlock.items():
				try:
					self.send_unlock(peer, space_points_and_states.keys(), True)
				except Exception,e:
					self.logger.error("########## serious error occured while canceling locking operation")  
		#----------------------------------------------------------------------------
		if not everything_is_locked: return None
		self._send_add_handle(new_node[1],space_point,handle)	
		for peer, tables in by_peer.items():
			#####self.logger.info("_overlay_add_handle: send update_topology to "+str(peer)+" : internal links="+str(tables[0]))
			#####self.logger.info("_overlay_add_handle: send update_topology to "+str(peer)+" : external links="+str(tables[1]))
			#####self.logger.info("_overlay_add_handle: send update_topology to "+str(peer)+" : delete links="+str(tables[2]))
			self._send_update_topology(peer,tables[0],tables[1],tables[2])
		for peer, space_points_and_states in to_unlock.items():
			self.send_unlock(peer, space_points_and_states.keys(), False)
		return new_node[1]

	def remove_data(self,data_description,handle_to_data):
		""" REMOVE a data into the Posnet network """
		pass # TODO

#------------------------------------------------
# Overlay Network methods for indexing data
#-------------------------------------
#------------------------------------------------------	
# Link management
# configure, receive/send open and close
#----------------------------------------------------
	def receive_close_link(self,message_source,message_data,client_socket,answer_type):
		neighbor_in_overlay_id = message_source
		space_point = message_data
		#####self.logger.info("receive_close_link: from neighbor="+str(neighbor_in_overlay_id)+", space_point="+str(space_point))
		if self._overlay_in_edges.has_key(neighbor_in_overlay_id):
			self._overlay_in_edges[neighbor_in_overlay_id].remove(space_point)
		if self._overlay_out_edges.has_key(neighbor_in_overlay_id):
			self._overlay_out_edges[neighbor_in_overlay_id].remove(space_point)
		self._reply_message(None,client_socket,answer_type)			

	def receive_tunnel(self,message_source, message_data,client_socket,answer_type):
		""" handle reception of a tunnel query """
		self.logger.info("tunnel message received from %s (data=%s)"%(str(message_source),str(message_data)))
		self._underlay.connect_tunnel_second(message_data,message_source)
		m  = posnet_tunnel_reply_message( self.id(), message_source,None)
		self._reply_message(m,client_socket,answer_type)

	def receive_tunnel_reply(self,message_source, message_data,client_socket,answer_type):
		""" handle reception of a tunnel_reply query """
		self.logger.info("tunnel_reply message received from %s"%str(message_source))
		m  = None
		self._reply_message(m,client_socket,answer_type)


	def receive_open_link(self,message_source,message_data,client_socket,answer_type):
		neighbor_in_overlay_id = message_source
		space_points = message_data.space_points
		if not neighbor_in_overlay_id in self._overlay_in_edges.keys():
			self._overlay_in_edges[neighbor_in_overlay_id] = sets.Set()
		for space_point in space_points:
			if not space_point in self._overlay_in_edges[neighbor_in_overlay_id]:
				self._overlay_in_edges[neighbor_in_overlay_id].add(space_point)	
				try:	
					# open link with underlay
					## TODO add_session_success =  self._underlay.add_listening_socket(message_data.requester)
					# send confirmation
					m = posnet_link_opened_message(self.id(),neighbor_in_overlay_id,space_point)
					self.logger.info("receive_open_link: send link_opened to "+str(neighbor_in_overlay_id))
					close_connection = True
					## TODO if add_session_success: close_connection = False
					self._reply_message(m,client_socket,answer_type,None,close_connection)
									
				except Exception,e:
					self.logger.error("receive_open_link: error! " +str(e))
					from traceback import print_exc
					print_exc()
					#---
					self._overlay_in_edges[neighbor_in_overlay_id].remove(space_point)
					if len(self._overlay_in_edges[neighbor_in_overlay_id]) == 0:
						del self._overlay_in_edges[neighbor_in_overlay_id]
					self._reply_message(None,client_socket,answer_type)
					#---
					raise # TODO treat errors !
			else:
				#self.logger.info("receive_open_link: already configured")
				self._reply_message(None,client_socket,answer_type,None,False)

	def configure_link(self,overlay_node_destination_id,space_point):
		hspace_point = self._semantic_graph.hashable_space_point(space_point)
		client_answer = self.send_open_link(overlay_node_destination_id,[hspace_point])
		#TODO => socket opened?
		#
		######self.logger.info("configure_link: client_answer="+str(client_answer))
		#if client_answer != hspace_point:
		#	raise Exception("TODO  bad link open")
		#neighbor_in_overlay_id = client_answer.source
		neighbor_in_overlay_id = overlay_node_destination_id 
		if not neighbor_in_overlay_id in self._overlay_out_edges.keys():
			self._overlay_out_edges[neighbor_in_overlay_id] = sets.Set()
		if not hspace_point in self._overlay_out_edges[neighbor_in_overlay_id]:
			self._overlay_out_edges[neighbor_in_overlay_id].add(hspace_point)	
		#####self.logger.info("configure_link: ok "+str(self.id())+" <--> "+str(overlay_node_destination_id)+" / space_point="+str(space_point))

	def send_open_link(self,overlay_node_destination_id,space_points):
		message = posnet_open_link_message(self.id(),overlay_node_destination_id,space_points,self.id())
		return self.send(message)

	def send_close_link(self,overlay_node_destination_id,space_point):
		message = posnet_close_link_message(self.id(),overlay_node_destination_id,space_point) 
		self.send(message)

#------------------------------------------------
# send/receive join and split
#------------------------------------------------
	def _retry_pending_join(self):
		""" re-try pending join requests"""
		node = None
		try:
			if len(self._join_pending_nodes) > 0:
				node = self._join_pending_nodes.pop()
				self.logger.info("retry split to node %s"%str(node))
				self._do_join(node)
		except Exception,e:
			self.logger.error("post-update_topology: contact pending nodes -> error: %s"%e) 
	
	def _do_join(self, neighbor_in_overlay_id):
		# get top nodes
		overlay_top =dict()
		for top in self._overlay_all_top():
			id = top[-1]
			if not overlay_top.has_key(id): overlay_top[id] = 1
			else: overlay_top[id] += 1
		# ------------------
		if len(overlay_top) == 0: #pass
			# the overlay is empty, add to pending
			self.logger.error("receive_join: cannot split myself  [ :( ] = i'm empty")
			#self._join_pending_nodes.add( neighbor_in_overlay_id)
			message = posnet_join_reply_message(self.id(),neighbor_in_overlay_id,(self.id(),""))
			self.send(message)
		else:
			tab = [ (num,id) for id,num in overlay_top.items()]
			tab.sort()
			tab.reverse()
			max_top_size = tab[0][0]
			index = 0
			to_choose = []
			while index < len(tab) and tab[index][0] == max_top_size:
				to_choose.append(tab[index][1])
				index += 1
			id = random.choice(to_choose)
			#####self.logger.info("receive_join: send split_message ->"+str(neighbor_in_overlay_id)+" (choosed from="+str(to_choose)+")")
			self.send_split(id,neighbor_in_overlay_id)
	
	def receive_join_reply(self,message_source, message_data,client_socket,answer_type):
		# close client connection that answered for the split
		self._reply_message(None,client_socket,answer_type)
		# -----------------
		join_entry_point,reply_msg = message_data
		if reply_msg == "":
			self.logger.error("join failed, retry in 5 seconds!")
			def threaded_job(self,message_source):
				try:
					sleep_time = random.choice([5,10,15,20,30,60])
					self.logger.info("sleep %s before retry join to %s"%(str(sleep_time),str(join_entry_point)))
					sleep(sleep_time)
					self.logger.info("retry join to %s"%str(join_entry_point))
					self.send_join( join_entry_point )
				except Exception,e:
					self.logger.error("an error has occured: %s"%str(e))
			thread.start_new_thread(threaded_job,(self,join_entry_point))
		else:
			waiting_for = message_data		
			self.logger.info("node %s has accepted join "%str(waiting_for))

		
	def receive_join(self,message_source, message_data,client_socket,answer_type):
		# close client connection that answered for the split
		self._reply_message(None,client_socket,answer_type)
		# -----------------
		neighbor_in_overlay_id = message_data
		self._do_join(neighbor_in_overlay_id)
				
	def receive_initialize(self,message_source, message_data,client_socket,answer_type):
		# first close the client connection
		#####self.logger.info("receive_initialize: "+str(message_source)+", now reply message")
		self._reply_message(None,client_socket,answer_type)
		try:
			self._local_initialize(message_data)
		except Exception,e:
			self.logger.error("error while initializing: %s"%str(e))	
			print_exc()
	def _local_initialize(self,initialize_data):
		graph = initialize_data["graph"]
		handles = dict()
		try:  handles = initialize_data["handles"]
		except KeyError: pass
		states = dict()
		try:  states = initialize_data["states"]
		except KeyError: pass
		nodes = sets.Set()
		links = sets.Set()
		for link,link_rel in graph.items():
			x,y = link
			relation,relation_sym = link_rel
			if x not in nodes:
				try: test = states[x]
				except KeyError: states[x] = 0
				try: test = handles[x]
				except KeyError: handles[x] = [""]
				# add nodes and handles	
				vertex_id = self._local_add_semantic_node(x)
				handle_table = handles[x]
				self._local_add_handles(handle_table,vertex_id)
				# lock node
				vertex_state = 0
				self._local_lock(x,vertex_state)
				nodes.add(x)
			if y not in nodes:
				try: test = states[y]
				except KeyError: states[y] = 0
				try: test = handles[y]
				except KeyError: handles[y] = [""]
				# add nodes and handles
				vertex_id = self._local_add_semantic_node(y)
				handle_table = handles[y]
				self._local_add_handles(handle_table,vertex_id)
				# lock node
				vertex_state = 0
				self._local_lock(y,vertex_state)
				nodes.add(y)
			if (x,y) not in links:
				# insert link
				self._local_add_semantic_edge(x,y,relation,relation_sym)	
				# add link
				links.add((x,y))	
				links.add((y,x))	
		# unlock
		for n in nodes:	
			self._local_unlock(n,True)	
		# OK :)
		return None


	def receive_better_split(self,message_source, message_data,client_socket,answer_type):
		self._reply_message(None,client_socket,answer_type)
		# try one neighbor
		neighbor_in_overlay_id = message_data
		self.logger.info("examine better split for node %s"%str(neighbor_in_overlay_id))
		try:
			#subgraph1,subgraph2 = self._semantic_graph.split()
			subgraph1 = self._semantic_graph.nodes() # MIIK
			subgraph2 = sets.Set()
			for node,neighbor in  self._external.items():
				pass
			#----------------------------
			if len(subgraph2) == 0 :
				raise Exception("no better split found")
			#--------------------------------------------------------------------------------------------------------------
			# 1. determine lock nodes and updates
			my_nodes_to_lock = dict() # key=nodes, value=states
			overlay_node_to_point_states_needed = dict()  # key = overlay node, values = space_points
			#my_states = self.send_state_request(self.id(),[self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(node)) for node in subgraph2])

			# 1.0 request states
			all_states = dict()
			my_states =  self.send_state_request(self.id(),None)
			### keys = points (format hashable, i.e. (p1,...,pk) )     values = (state,locked)

			# 1.1 (subgraph2 is for neighbor)
			for node in subgraph2:
				if not overlay_node_to_point_states_needed.has_key( self.id()):
					overlay_node_to_point_states_needed[self.id()] = sets.Set()
				overlay_node_to_point_states_needed[self.id()].add(   self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(node)) )
			#----------------------------------------
			# 1.2 compute update_topology (record nodes to lock: border semantic nodes) 
			update_links = []
			update_ext = []	
			rem_links = []
			cur_ext = []
			overlay_node_to_updates = dict() 

			for node in subgraph2:
				space_point = self._semantic_graph.space_point(node)
				for node2 in self._semantic_graph.successors(node):
					space_point2 = self._semantic_graph.space_point(node2)
					relation = self._semantic_graph.relation(node,node2)
					relation_sym = Relations_sym[relation]
					# neighboring point belongs to another node
					if self._external.has_key(node2):
						n_overlay_id = self._external[node2]
						#--
						if not overlay_node_to_point_states_needed.has_key(n_overlay_id):
							overlay_node_to_point_states_needed[n_overlay_id] = sets.Set()	
						overlay_node_to_point_states_needed[n_overlay_id].add( self._semantic_graph.hashable_space_point(space_point2) )
						#--
						update_ext.append( (n_overlay_id,space_point,space_point2,relation,relation_sym) )
						rem_links.append( (space_point,space_point2) )
						rem_links.append( (space_point2,space_point) )
						if not overlay_node_to_updates.has_key(n_overlay_id):
							overlay_node_to_updates[n_overlay_id] = []
						overlay_node_to_updates[n_overlay_id].append( (neighbor_in_overlay_id,space_point2,space_point,relation_sym,relation)  )
					# neighboring point belongs to self overlay node
					else:
						# internal to new overlay node
						if node2 in subgraph2:
							update_links.append ( ( space_point,space_point2,relation,relation_sym)  )
							rem_links.append( (space_point,space_point2) )
							rem_links.append( (space_point2,space_point) )
						# from self to new overlay node
						else:
							#--
							overlay_node_to_point_states_needed[self.id()].add( self._semantic_graph.hashable_space_point(space_point2) )
							#--
							update_ext.append( (self.id(),space_point,space_point2,relation,relation_sym) )
							cur_ext.append( ( neighbor_in_overlay_id,space_point2,space_point,relation_sym,relation))
							rem_links.append( (space_point,space_point2) )
			# 1.3 ask for neighor states
			for node,set_of_points in overlay_node_to_point_states_needed.items():
				if node == self.id():
					# restrict states for self to touched nodes
					dico = dict()	
					for po in set_of_points:
						dico[po] = my_states[po]
					all_states[self.id()] = dico
					# del my_states
				else:
					the_states =  self.send_state_request(self.id(),[snode for snode in set_of_points])
					all_states[node] = the_states
	
			# 1.4 lock!
			# TRY TO LOCK :)
			to_unlock = dict()
			everything_is_locked = True
			try:
				# request states and then lock
				for peer, space_points_and_states in all_states.items():
					# send lock
					lock_result = self.send_lock( peer, space_points_and_states)
					if not lock_result: raise Exception('one node from %s was not successfully locked'%str(peer))
					to_unlock[peer] = space_points_and_states
					#####self.logger.info("Lock result on "+str(peer)+": "+str(lock_result))
					# 
			except Exception,e:
				everything_is_locked = False
				# Exception -> cancel lock, wait for a random moment and exit with false ret code
				self.logger.error("split: error(%s), begin cancel operation"%str(e))
				# now unlock (in fact cancel!)!
				for peer, space_points_and_states in to_unlock.items():
					try:
						self.send_unlock(peer, space_points_and_states.keys(), True)
					except Exception,e:
						self.logger.error("########## serious error occured while canceling locking operation")  
						from traceback import print_exc
						print_exc()
			#----------------------------------------------------------------------------
			if not everything_is_locked:
				# TODO handle error: print message? 
				self.logger.error("Unable to  split because everything is not locked")
				return None
			# open tunnel -> DOES NOT WORK BECAUSE ARE  ASYNCHRONUS	-> SYNC MESSAGES ARE NOT WORKING= lock... !!!
			#tunnel_create_address = self._underlay.create_tunnel_first(neighbor_in_overlay_id)
			#self.logger.info("tunnel preparation: %s"%str(tunnel_create_address))
			#tunnel_msg = posnet_tunnel_message(self.id(),neighbor_in_overlay_id,tunnel_create_address)
			#self.send(tunnel_msg)	
			#---------------------------------------
			# 2. add delegated nodes to neighbor
			neighbor_space_points_and_states = dict()
			for node in subgraph2:
				handles = [handle for handle in self._nodes_to_handles[node]]
				space_point = self._semantic_graph.space_point(node)
				# add handles
				self._send_add_handle(neighbor_in_overlay_id,space_point,handles[0])
				for handle in handles[1:]:	
					self._send_add_handle(neighbor_in_overlay_id,space_point,handle)
				neighbor_space_points_and_states[self._semantic_graph.hashable_space_point(space_point)] = 0
			# send lock
			lock_result = self.send_lock( neighbor_in_overlay_id, neighbor_space_points_and_states)
			if not lock_result: raise Exception('neighbor node %s was not successfully locked'%str(neighbor_in_overlay_id))
			to_unlock[neighbor_in_overlay_id] = neighbor_space_points_and_states
			#----------------------------------------
			# 3. send update topology to neighbor
			self._send_update_topology(neighbor_in_overlay_id,update_links,update_ext,[])	
			#----------------------------------------
			# 3. send update topology to all other concerned nodes
			for n_overlay_id,tab in overlay_node_to_updates.items():	
				self._send_update_topology(n_overlay_id,[],tab,[])
			#----------------------------
			#  finish: unlink current node
			self._send_update_topology(self.id(),[],cur_ext,rem_links)	
			for peer, space_points_and_states in to_unlock.items():
				try:
					self.send_unlock(peer, space_points_and_states.keys(), False)
				except Exception,e:
					self.logger.error("post-split: error during unlock of %s (%s)"% ( str(peer),str(e),)) 
			#----------------------------
			# finish: remove delegated nodes
			for node in subgraph2:
				space_point = self._semantic_graph.space_point(node)
				handles = [handle for handle in self._nodes_to_handles[node]]	
				if not self._external.has_key(node):
					for handle in handles:	
						self._local_remove_semantic_node(space_point,handle)		
			#-------------------------------
			# tunnel creation
			try:
				pass
				# TODO
				#tunnel_create_address = self._underlay.create_tunnel_first(neighbor_in_overlay_id)
				#self.logger.info("tunnel preparation: %s"%str(tunnel_create_address))
				#tunnel_msg = posnet_tunnel_message(self.id(),neighbor_in_overlay_id,tunnel_create_address)
				#self.send(tunnel_msg)	
			except Exception,e:
				self.logger.error("tunnel creation failed! (%s)"%str(e)) 
			#
		except Exception,e:
			self.logger.error("receive_split: unable to split now, %s"%e)
			#self._join_pending_nodes.add(neighbor_in_overlay_id)




	def receive_split(self,message_source, message_data,client_socket,answer_type):
		########self.logger.info("at "+str(self)+" split message received")
		# TODO
		# decide to split
		#----
		# first close the client connection
		#####self.logger.info("receive_split: "+str(message_source)+", now reply message")
		self._reply_message(None,client_socket,answer_type)
		#####self.logger.info("receive_split: replied message, now split semantic graph")
		try:
			neighbor_in_overlay_id = message_data
			self.logger.info("begin split for new node %s"%str(neighbor_in_overlay_id))
			subgraph1,subgraph2 = self._semantic_graph.split()
			if len(subgraph2) == 0 :
				raise Exception("semantic graph was not splittable (empty second part returned by graph  split procedure")
			#--------------------------------------------------------------------------------------------------------------
			# 1. determine lock nodes and updates
			my_nodes_to_lock = dict() # key=nodes, value=states
			overlay_node_to_point_states_needed = dict()  # key = overlay node, values = space_points
			#my_states = self.send_state_request(self.id(),[self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(node)) for node in subgraph2])

			# 1.0 request states
			all_states = dict()
			my_states =  self.send_state_request(self.id(),None)
			### keys = points (format hashable, i.e. (p1,...,pk) )     values = (state,locked)

			# 1.1 (subgraph2 is for neighbor)
			for node in subgraph2:
				if not overlay_node_to_point_states_needed.has_key( self.id()):
					overlay_node_to_point_states_needed[self.id()] = sets.Set()
				overlay_node_to_point_states_needed[self.id()].add(   self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(node)) )
			#----------------------------------------
			# 1.2 compute update_topology (record nodes to lock: border semantic nodes) 
			update_links = []
			update_ext = []	
			rem_links = []
			cur_ext = []
			overlay_node_to_updates = dict() 

			for node in subgraph2:
				space_point = self._semantic_graph.space_point(node)
				for node2 in self._semantic_graph.successors(node):
					space_point2 = self._semantic_graph.space_point(node2)
					relation = self._semantic_graph.relation(node,node2)
					relation_sym = Relations_sym[relation]
					# neighboring point belongs to another node
					if self._external.has_key(node2):
						n_overlay_id = self._external[node2]
						#--
						if not overlay_node_to_point_states_needed.has_key(n_overlay_id):
							overlay_node_to_point_states_needed[n_overlay_id] = sets.Set()	
						overlay_node_to_point_states_needed[n_overlay_id].add( self._semantic_graph.hashable_space_point(space_point2) )
						#--
						update_ext.append( (n_overlay_id,space_point,space_point2,relation,relation_sym) )
						rem_links.append( (space_point,space_point2) )
						rem_links.append( (space_point2,space_point) )
						if not overlay_node_to_updates.has_key(n_overlay_id):
							overlay_node_to_updates[n_overlay_id] = []
						overlay_node_to_updates[n_overlay_id].append( (neighbor_in_overlay_id,space_point2,space_point,relation_sym,relation)  )
					# neighboring point belongs to self overlay node
					else:
						# internal to new overlay node
						if node2 in subgraph2:
							update_links.append ( ( space_point,space_point2,relation,relation_sym)  )
							rem_links.append( (space_point,space_point2) )
							rem_links.append( (space_point2,space_point) )
						# from self to new overlay node
						else:
							#--
							overlay_node_to_point_states_needed[self.id()].add( self._semantic_graph.hashable_space_point(space_point2) )
							#--
							update_ext.append( (self.id(),space_point,space_point2,relation,relation_sym) )
							cur_ext.append( ( neighbor_in_overlay_id,space_point2,space_point,relation_sym,relation))
							rem_links.append( (space_point,space_point2) )
			# 1.3 ask for neighor states
			for node,set_of_points in overlay_node_to_point_states_needed.items():
				if node == self.id():
					# restrict states for self to touched nodes
					dico = dict()	
					for po in set_of_points:
						dico[po] = my_states[po]
					all_states[self.id()] = dico
					# del my_states
				else:
					the_states =  self.send_state_request(self.id(),[snode for snode in set_of_points])
					all_states[node] = the_states
	
			# 1.4 lock!
			# TRY TO LOCK :)
			to_unlock = dict()
			everything_is_locked = True
			try:
				# request states and then lock
				for peer, space_points_and_states in all_states.items():
					# send lock
					lock_result = self.send_lock( peer, space_points_and_states)
					if not lock_result: raise Exception('one node from %s was not successfully locked'%str(peer))
					to_unlock[peer] = space_points_and_states
					#####self.logger.info("Lock result on "+str(peer)+": "+str(lock_result))
					# 
			except Exception,e:
				everything_is_locked = False
				# Exception -> cancel lock, wait for a random moment and exit with false ret code
				self.logger.error("split: error(%s), begin cancel operation"%str(e))
				# now unlock (in fact cancel!)!
				for peer, space_points_and_states in to_unlock.items():
					try:
						self.send_unlock(peer, space_points_and_states.keys(), True)
					except Exception,e:
						self.logger.error("########## serious error occured while canceling locking operation")  
						from traceback import print_exc
						print_exc()
			#----------------------------------------------------------------------------
			if not everything_is_locked:
				# TODO handle error: print message? 
				self.logger.error("Unable to  split because everything is not locked")
				return None
			# open tunnel -> DOES NOT WORK BECAUSE ARE  ASYNCHRONUS	-> SYNC MESSAGES ARE NOT WORKING= lock... !!!
			#tunnel_create_address = self._underlay.create_tunnel_first(neighbor_in_overlay_id)
			#self.logger.info("tunnel preparation: %s"%str(tunnel_create_address))
			#tunnel_msg = posnet_tunnel_message(self.id(),neighbor_in_overlay_id,tunnel_create_address)
			#self.send(tunnel_msg)	
			#---------------------------------------
			# 2. add delegated nodes to neighbor
			neighbor_space_points_and_states = dict()
			for node in subgraph2:
				handles = [handle for handle in self._nodes_to_handles[node]]
				space_point = self._semantic_graph.space_point(node)
				# add handles
				self._send_add_handle(neighbor_in_overlay_id,space_point,handles[0])
				for handle in handles[1:]:	
					self._send_add_handle(neighbor_in_overlay_id,space_point,handle)
				neighbor_space_points_and_states[self._semantic_graph.hashable_space_point(space_point)] = 0
			# send lock
			lock_result = self.send_lock( neighbor_in_overlay_id, neighbor_space_points_and_states)
			if not lock_result: raise Exception('neighbor node %s was not successfully locked'%str(neighbor_in_overlay_id))
			to_unlock[neighbor_in_overlay_id] = neighbor_space_points_and_states
			#----------------------------------------
			# 3. send update topology to neighbor
			self._send_update_topology(neighbor_in_overlay_id,update_links,update_ext,[])	
			#----------------------------------------
			# 3. send update topology to all other concerned nodes
			for n_overlay_id,tab in overlay_node_to_updates.items():	
				self._send_update_topology(n_overlay_id,[],tab,[])
			#----------------------------
			#  finish: unlink current node
			self._send_update_topology(self.id(),[],cur_ext,rem_links)	
			for peer, space_points_and_states in to_unlock.items():
				try:
					self.send_unlock(peer, space_points_and_states.keys(), False)
				except Exception,e:
					self.logger.error("post-split: error during unlock of %s (%s)"% ( str(peer),str(e),)) 
			#----------------------------
			# finish: remove delegated nodes
			for node in subgraph2:
				space_point = self._semantic_graph.space_point(node)
				handles = [handle for handle in self._nodes_to_handles[node]]	
				if not self._external.has_key(node):
					for handle in handles:	
						self._local_remove_semantic_node(space_point,handle)		
			#-------------------------------
			# tunnel creation
			try:
				pass
				# TODO
				#tunnel_create_address = self._underlay.create_tunnel_first(neighbor_in_overlay_id)
				#self.logger.info("tunnel preparation: %s"%str(tunnel_create_address))
				#tunnel_msg = posnet_tunnel_message(self.id(),neighbor_in_overlay_id,tunnel_create_address)
				#self.send(tunnel_msg)	
			except Exception,e:
				self.logger.error("tunnel creation failed! (%s)"%str(e)) 
			#
		except Exception,e:
			self.logger.error("receive_split: unable to split now, %s"%e)
			#self._join_pending_nodes.add(neighbor_in_overlay_id)


	def send_join(self,overlay_node_destination_id):
		message = posnet_join_message(self.id(),overlay_node_destination_id,self.id()) 
		self.send(message)
	def send_split(self,overlay_node_destination_id,requester_id):
		message = posnet_split_message(self.id(),overlay_node_destination_id,requester_id) 
		self.send(message)

#--------------------------------------------------
# close TODO !
#-----------------------------------------------
	def close(self):
		""" proper close this node -> delegate data to other nodes of the overlay"""
		print >>sys.stderr,"TODO: node",self,"closing"
#		if len(self._overlay_edges.keys()) == 0: return
#		overlay_index_alive = self._overlay_edges.keys()
#		while len(overlay_index_alive) > 0:
#			neighbor = self._overlay_edges[random.choice(range(len(overlay_index_alive)))]
#			self.send_store(neighbor,self_data)
#			del overlay_index_alive[neighbor_index]
#			for neighbor_id in self._overlay_edges.keys(): self.send_close_link(neighbor_id)
#			break
#		if len(overlay_index_alive) == 0:
#			raise Exception("unable to properly close")
		print "node",self,"closed"

	def __del__(self):
		self.close()

#-------------------------------------------------------------------------------
# Local Modifications on handles stored
#-------------------------------------------------------------------------------
##	def send_store(self,overlay_node_destination_id,store_message):
##		message = posnet_store_message(self.id(),overlay_node_destination_id,store_message) 
##		self.send(message)
##
##	def receive_store(self,store_message):
##		#print "\t* store_message received at ",self
##		store_data = store_message.data
##		# treat store
##		# store message format  desc,handle,neighbors
##		# neighbors = [ (space_point,node_network) ]
##		#
##		# 1. add node to internal graph
##		space_point = store_data[0]
##		node_handle = store_data[1]
##		node_neighbors = store_data[2:]
##		#self.add_data[store_data[0]
##		for dat in delegation_data:
##			data_desc,data_handle = dat
##			self.add_data(data_desc,data_handle)
	def receive_add_handle(self,message_source, add_handle_message ,client_socket,answer_type):
		#print "\t* add_handle_message received at ",self
		#####self.logger.info("receive_add_handle: add handle")
		self._receive_add_handle(add_handle_message)
		#####self.logger.info("receive_add_handle: replay message to close incoming socket")
		self._reply_message(None,client_socket,answer_type)
		
	def _receive_add_handle(self,add_handle_message):
		self._local_add_semantic_node(add_handle_message.space_point,add_handle_message.handle)

	def send_add_handle(self,overlay_node_destination_id,add_handle_message):
		message = posnet_add_handle_message(self.id(),overlay_node_destination_id,add_handle_message) 
		self.send(message)
	def _send_add_handle(self,destination,space_point,handle):
		dat = posnet_add_handle(space_point,handle)
		if destination == self.id():
			self._receive_add_handle(dat)
		else:	
			self.send_add_handle(destination,dat)

	def _local_add_handle(self,handle_to_data,semantic_node):
		""" add a handle to this node"""
		# TODO lock?
		if not self._nodes_to_handles.has_key(semantic_node): self._nodes_to_handles[semantic_node] = sets.Set()
		self._nodes_to_handles[semantic_node].add(handle_to_data)
	def _local_add_handles(self,handle_table,semantic_node):
		""" add handles to this node """
		# TODO lock?
		updated_set  = sets.Set()
		if self._nodes_to_handles.has_key(semantic_node): updated_set  =  self._nodes_to_handles[semantic_node] = sets.Set()
		for handle_to_data in handle_table: updated_set.add(handle_to_data)
		self._nodes_to_handles[semantic_node] = updated_set

	def _local_remove_handle(self,handle_to_data,semantic_node):
		""" remove a handle from this node"""
		# TODO lock?
		if not self._nodes_to_handles.has_key(semantic_node): return
		if not handle_to_data in self._nodes_to_handles[semantic_node]: return
		self._nodes_to_handles[semantic_node].remove(handle_to_data)
		# del entry related to this semantic_node if no handles remain
		if len(self._nodes_to_handles[semantic_node]) == 0: del self._nodes_to_handles[semantic_node]
		
	def _local_clear_handles(self,semantic_node):
		""" remove all handles from this node"""
		# TODO lock?
		if not self._nodes_to_handles.has_key(semantic_node): return
		del self._nodes_to_handles[semantic_node]

#-------------------------------------------------------------------------------
# send/receive update topology
# Local Modifications on the local semantic graph of this node
# Add/remove for nodes and edges (including external edges)
#-------------------------------------------------------------------------------
	def send_update_topology(self,overlay_node_destination_id,update_topology_message):
		message = posnet_update_topology_message(self.id(),overlay_node_destination_id,update_topology_message) 
		self.send(message)

	def _receive_update_topology(self,update_topology_message):
		#self.logger.info("update_topology_message: %s,%s,%s"% ( str(update_topology_message.add),str(update_topology_message.ext),str(update_topology_message.remove),) )
		for link in update_topology_message.add:
			source,target,relation,relation_sym  = link
			self._local_add_semantic_edge(source,target,relation,relation_sym)	
		for link in update_topology_message.ext:
			over_node,source,target,relation,relation_sym  = link
			self._local_add_semantic_edge_external(over_node,source,target,relation,relation_sym)	
		for link in update_topology_message.remove:
			source,target = link
			self._local_remove_semantic_edge(source,target)
		###self.logger.debug("check external nodes to remove")
		toremove = sets.Set()
		for node,nei in self._external.items():
			if len(self._semantic_graph.predecessors(node) ) == 0:
				# remove the node
				#####self.logger.info("check nodes to remove: found semantic node to remove")
				toremove.add(node)
		for semantic_node in toremove:
			self._semantic_graph.remove_node(semantic_node)
			#####self.logger.info("chec nodes to remove: remove semantic node "+str(semantic_node))
			del self._external[semantic_node]
		
		

	def receive_update_topology(self,message_source, update_topology_message ,client_socket,answer_type):
		###self.logger.debug("at "+str(self)+" update_topology_message received")
		#####self.logger.info("receive_update_topology: update topology")
		self._receive_update_topology(update_topology_message)
		#####self.logger.info("receive_update_topology: replay message to close incoming socket")
		self._reply_message(None,client_socket,answer_type)
		#---
		# now try to help pending nodes :)
		
		
					
	def _send_update_topology(self,destination,add,ext,remove):
		dat = posnet_update_topology()
		for li in add:
			source,target,r,r_sym = li
			dat.add_link_addition(source,target,r,r_sym)
		for li in ext:
			onode,source,target,r,r_sym = li
			dat.add_external_link_addition(onode,source,target,r,r_sym)
		for li in remove:
			source,target = li
			dat.add_link_removal(source,target)
		if destination == self.id():
			self._receive_update_topology(dat)
			#----------
			
		else:	
			self.send_update_topology(destination,dat)

	def _local_add_semantic_node(self,space_point,handle_to_data = None):
		""" add a pair (handle,semantic_node) to this node """
		# TODO lock ?
		######self.logger.info("_local_add_semantic_node: "+str(space_point)+" handle="+str(handle_to_data))
		vertex_id = None	
		space_point = self._semantic_graph.space_point_table(space_point)
		try:
			vertex_id = self._semantic_graph.vertex_id(space_point)	
			#####self.logger.info("_local_add_semantic_node: "+str(space_point)+" already found, vertex_id="+str(vertex_id))
		except:
			vertex_id = self._semantic_graph.add_node(space_point)
			#####self.logger.info("add semantic node vertex_id="+str(vertex_id)+", space_point="+str(space_point))
			#OLD self._nodes_to_state[vertex_id] = [0,thread.allocate_lock() ]
			try:
				self._nodes_to_lock.acquire()
				self._nodes_to_state[vertex_id] = [0, False ]
			finally:
				self._nodes_to_lock.release()
		#self.logger.info("add semantic node: %s"%space_point)
		if handle_to_data is not None:
			self._local_add_handle(handle_to_data,vertex_id)
		return vertex_id
	def _local_remove_semantic_node(self,space_point,handle_to_data):
		""" remove a pair (handle,semantic_node) from this node"""
		space_point = self._semantic_graph.space_point_table(space_point)
		semantic_node = self._semantic_graph.vertex_id(space_point)
		#if self._external[semantic_node]: raise Exception("cannot remove external nodes with this function")
		self._local_remove_handle(handle_to_data,semantic_node)
		if not self._nodes_to_handles.has_key(semantic_node):
			#print "remove expressively called",space_point,handle_to_data
			self._semantic_graph.remove_node(semantic_node)
			#####self.logger.info("**** remove semantic node "+str(space_point)+" with vertex_id "+str(semantic_node)) 
			# TODO remove external links
			# TODOOOOOO
			try:
				self._nodes_to_lock.acquire()
				del self._nodes_to_state[semantic_node]
			finally:
				self._nodes_to_lock.release()
		else: self.logger.error("cannot remove semantic node "+str(space_point)+" with vertex_id "+str(semantic_node))
		
	def _local_add_semantic_edge(self,space_point1,space_point2,relation,relation_sym):
		""" add an edge to this node (relation from 1 to 2 and relation from 2 to 1 are needed, even if they are known to be symetric """
		# TODO lock ?
		
		space_point1 = self._semantic_graph.space_point_table(space_point1)
		space_point2 = self._semantic_graph.space_point_table(space_point2)
		node1 = self._semantic_graph.vertex_id(space_point1)
		node2 = self._semantic_graph.vertex_id(space_point2)
		if node1 == node2: raise Exception("cannot add a self-loop link")	
		#MIKK
		if self._external.has_key(node1):
			self.logger.error("node %s should not be external!"%str(space_point1))
		if self._external.has_key(node2):
			self.logger.error("node %s should not be external!"%str(space_point2))
		#####self.logger.info("**** add semantic edge "+str(space_point1)+" -> "+str(space_point2)+" "+str(relation)+"/"+str(relation_sym))
		self._semantic_graph.add_edge(node1,node2,relation,relation_sym)
	
	def _local_add_semantic_edge_external(self,neighbor_overlay_node,space_point1,space_point2,relation,relation_sym):
		""" add an external edge to this node"""
		# TODO lock ?
		space_point1 = self._semantic_graph.space_point_table(space_point1)
		space_point2 = self._semantic_graph.space_point_table(space_point2)
		node1 = self._semantic_graph.vertex_id(space_point1)
		node2 = None
		try:
			node2 = self._semantic_graph.vertex_id(space_point2)
		except:
			node2 = self._semantic_graph.add_node(space_point2)
			#####self.logger.info("**** add (ext) semantic node vertex_id="+str(node2)+", space_point="+str(space_point2))
			#OLD self._nodes_to_state[node2] = [0,thread.allocate_lock() ]
			try:
				self._nodes_to_lock.acquire()
				self._nodes_to_state[node2] = [0, False ]
			finally:
				self._nodes_to_lock.release()
		self._external[node2] = neighbor_overlay_node
		self._semantic_graph.add_edge(node1,node2,relation,relation_sym,False)
		#self.logger.info("add external semantic edge %s(%s) -> %s::%s(%s) relations %s/%s" % (str(space_point1),str(node1),str(neighbor_overlay_node),str(space_point2),str(node2),str(relation),str(relation_sym)))
		self.configure_link(neighbor_overlay_node,space_point2)
		# TODO if error ?

	def _local_remove_semantic_edge(self,space_point1,space_point2):
		""" remove a semantic edge to this node """
		# TODO lock ?
		space_point1 = self._semantic_graph.space_point_table(space_point1)
		space_point2 = self._semantic_graph.space_point_table(space_point2)
		node1 = self._semantic_graph.vertex_id(space_point1)
		node2 = self._semantic_graph.vertex_id(space_point2)
		#####self.logger.info("**** remove semantic edge"+str(space_point1)+"("+str(node1)+") ->"+str(space_point2)+"("+str(node2)+")")
		self._semantic_graph.remove_edge_unidir(node1,node2)
		if False:
			if self._external.has_key(node2):
				self.logger.info("remove external semantic edge 2")
				#if self._semantic_graph.degree(node2) == 0:
				#	#####self.logger.info("**** remove external because no link more"+str(node2))
				#	self._semantic_graph.remove_node(node2)
				#	del self._external[node2] 
			if self._external.has_key(node1):
				self.logger.info("remove external semantic edge 1")
				#if self._semantic_graph.degree(node1) == 0:
				#	#####self.logger.info("**** remove external because no link more"+str(node1))
				#	del self._external[node1] 
				#	self._semantic_graph.remove_node(node1)
	
#---------------------------------------
# Lock / Unlock (local,send,receive)
#-------------------------------------------
	def _local_lock(self,space_point,expected_state):
		""" lock a semantic node local to this overlay node
			@param space_point: the space point corresponding to the semantic node
			@param expected_state: the expected state information for the node (must match with the current state of the node!)
			@return: True when success (state exepected and current state were the same and node was unlocked)
			         False when semantic node was locked or when state were not the same
		"""
		vertex_id = self._semantic_graph.vertex_id(space_point)
		current_state =  None	
		try:
			self._nodes_to_lock.acquire()
			#is external now?
			#if self._external.has_key(vertex_id):
			#	self.logger.error("external to lock")
			#	return False
			#is modified?
			current_state = self._nodes_to_state[vertex_id][0]
			if current_state > expected_state:
				self.logger.error("semantic has changes")
				return False
			#is lock?
			locked_state = self._nodes_to_state[vertex_id][1]
			if locked_state:
				self.logger.error("semantic node already locked")
				return False
			# ok now lock	
			self._nodes_to_state[vertex_id][1] = True
			#self.logger.info("locked: %s"%str(space_point))
			return True
		finally:
			self._nodes_to_lock.release()
	
	def _local_unlock(self,space_point,is_cancel = None):
		""" unlock a semantic node local to this overlay node
			@param space_point: the space point corresponding to the semantic node
			@param is_cancel: when true, it means this unlock correspond to a cancel operation
			if the semantic node is already unlocked, this method does nothing
			when this is not a cancel operation, the state is incremented before unlocking
		"""
		#self.logger.info("local unlock %s (cancel=%s)"%(str(space_point),str(is_cancel)))
		exist = True
		try:
			vertex_id = self._semantic_graph.vertex_id(space_point)
		except:
			exist = False
		if exist:
			if is_cancel is None: is_cancel = False
			try:
				self._nodes_to_lock.acquire() # NEW
				#OLD is_locked = self._nodes_to_state[vertex_id][1].locked()
				is_locked = self._nodes_to_state[vertex_id][1]
				#####self.logger.info("_local_unlock ("+ str(space_point) +"): locked="+str(is_locked)+", is_cancel?"+str(is_cancel))
				if is_locked:
					if not is_cancel:
						self._nodes_to_state[vertex_id][0] =  1 + self._nodes_to_state[vertex_id][0]
						#self.logger.info("state increment %s"%str(space_point))
					self._nodes_to_state[vertex_id][1] = False
					# OLD self._nodes_to_state[vertex_id][1].release()
			finally:
				self._nodes_to_lock.release()
			#except Exception,e:
			#	#####self.logger.info("error while releasing lock of "+str(space_point)+": "+str(e))
			#	from traceback import print_exc
			#	print_exc()
		else:
			self.logger.error("vertex does not exist for unlocking %s"%str(space_point))

	def _local_lock_subgraph(self,space_points_and_states):
		""" lock a set of semantic nodes local to this overlay node
		    @param space_points_and_states: a dictionnary with semantic node's space points (keys) and expected states (values)
		    @return: True if success
		             False otherwise
		"""
		locked = sets.Set()
		try:
			
			for space_point,expected_state in space_points_and_states.items():
				#####self.logger.info("_local_lock_subgraph: examine "+str( (space_point,expected_state) ))
				ok_for_this_one = self._local_lock(space_point,expected_state)
				#####self.logger.info("_local_lock_subgraph: ok="+str(ok_for_this_one))
				if not ok_for_this_one: raise Exception("unable to lock %s from %s"%(str(space_point),str(self.id())))
				locked.add(space_point)
			return True
		except Exception,e:
			self.logger.error("Error while locking: %s"%str(e))
			#from traceback import print_exc
			#print_exc()
			while len(locked) > 0:
				space_point_to_unlock = locked.pop()
				#self._local_unlock(space_point_to_unlock,False)
				self._local_unlock(space_point_to_unlock,True)
		return False
	
	def _local_unlock_subgraph(self,space_points,is_cancel = None):
		""" unlock a set of semantic nodes local to this overlay node
			@param space_points: a tab of space points corresponding to the semantic nodes
			@param is_cancel: when true, it means this unlock correspond to a cancel operation
		"""
		if is_cancel is None: is_cancel = False
		for space_point   in space_points:
			self._local_unlock(space_point,is_cancel)

	def receive_lock(self,message_source, message_data,client_socket,answer_type):
		#self.logger.info("receive_lock: %s,%s,%s"%(str(message_source),str(client_socket),str(answer_type)))
		lock_result = self._local_lock_subgraph( message_data.space_points_and_states )
		#####self.logger.info("receive_lock: lock result="+str(lock_result))
		reply_data = posnet_lock_reply(message_data.space_points_and_states , lock_result)
		m  = posnet_lock_reply_message( self.id(), message_source,reply_data)
		#####self.logger.info("receive_lock: send reply message")
		self._reply_message(m,client_socket,answer_type)
		return lock_result
	
	def receive_unlock(self,message_source, message_data,client_socket,answer_type):
		#####self.logger.info("receive_unlock: space_points="+str(message_data.space_points))
		self._local_unlock_subgraph(message_data.space_points,message_data.is_cancel)
		self._reply_message(None,client_socket,answer_type)

	def send_lock(self,overlay_node_destination_id,space_points_and_states):
		lockdata = posnet_lock(space_points_and_states )
		m   = posnet_lock_message( self.id(), overlay_node_destination_id , lockdata)
		m.answer_type = 0
		lock_reply  = self.send( m, 0)
		#####self.logger.info("send_lock: receive lock_reply="+str(lock_reply))
		return lock_reply

	def send_unlock(self,overlay_node_destination_id,space_points_tab,is_cancel):
		lockdata = posnet_unlock(space_points_tab,is_cancel)
		m   = posnet_unlock_message( self.id(), overlay_node_destination_id , lockdata)
		m.answer_type = 0
		unlock_reply = self.send( m, 0)
		#####self.logger.info("send_unlock: to "+str(overlay_node_destination_id))
		
##	def receive_lock(self,lock_data):
##		#print "lock message received at ",self
##		requester = lock_data.requester
##		owner = lock_data.owner
##		lock = lock_data.lock
##		locked = []
##		for space_point in lock:
##			result = self.lock(space_point)
##			if result: locked.append( space_point)
##		lock_data.lock = locked
##		self.send_lock_confirm(requester,lock_data)
##	def receive_lock_confirm(self,lock_data):
##		#print "\t* lock_confirm message received at ",self
##		for space_point in lock_data.lock:
##			#print "\t* semantic node",space_point,"is locked at node",lock_data.owner
##			pass	
##	def send_lock_confirm(self,overlay_node_destination_id,lock_confirm_message):
##		message = posnet_lock_confirm_message(self.id(),overlay_node_destination_id,lock_confirm_message) 
##		self.send(message)

#------------------------------------------------------------------
# Get data from this node
#------------------------------------------------------------------
#	def graph(self): return self._semantic_graph
	def get_degree_distrib(self):
		return self._semantic_graph.degree_distrib()
	
	def space_points(self):	
		ret = dict()
		for node in  self._semantic_graph.nodes():
			ret[node] = self._semantic_graph.space_point(node)
		return ret

	def get_neighbors(self):
		neighbors = sets.Set()	 
		for id in self._external.values(): neighbors.add(id)
		return [id for id in neighbors]

	def get_raw_graph(self):
		#self.id_str()
		ret = dict()
		for node in  self._semantic_graph.nodes():
			links = []	
			sp = self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(node))
			for peer in self._semantic_graph.peers(node):
				links.append( ('p2p',peer) )
			for child in self._semantic_graph.children(node):
				links.append( ('p2c',child) )
			for  parent in self._semantic_graph.parents(node):
				links.append( ('c2p',parent) )
			tab = dict() 
			for link in links:
				rel,onode = link
				osp = self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(onode) )	
				tab[osp]  = rel
			ret[ sp ] = tab
		return ret

	def write_dot(self,out,include_others = None):
		if include_others is None: include_others = False
		print >> out,"subgraph cluster_"+self.id_str()+" { "
		print >>out,"style=filled;"
		print >>out,"color=lightgrey;"
		print >>out,"shape=box;"
		#print >>out,"node [style=filled,color=white];"
		external = []
		get_label = self.get_label
		for node in  self._semantic_graph.nodes():
			ext_node = None
			if self._external.has_key(node):
				if not include_others: continue
				ext_node = self._external[node]
				print >>out,	get_label(node),"[label=\""+get_label(node)+" but owns by "+str(ext_node)+"\",style=dashed];"
				#external.append( (self._semantic_graph.space_point(node),self.id(),node,ext_node) )
			elif len(self._semantic_graph.parents(node)) == 0:
				print >>out,    get_label(node),"[label=\""+self.get_Label(node)+"\",style=filled,color=green,shape=quare];"
			else:
				print >>out,    get_label(node),"[label=\""+self.get_Label(node)+"\",style=filled,color=white];"
			links = []	
			for peer in self._semantic_graph.peers(node):
				links.append( ('p2p',peer) )
			for child in self._semantic_graph.children(node):
				links.append( ('p2c',child) )
			if include_others:
				for  parent in self._semantic_graph.parents(node):
					links.append( ('c2p',parent) )
			for link in links:
				rel,onode = link
				oext_node = None
				if self._external.has_key(onode): oext_node = self._external[onode]
				color = ""
				if oext_node is not None or ext_node is not None:
					color = ",color=red,style=dashed"
					if ext_node is not None: raise Exception("internal error: an edge is starting from an external semantic node...")
					mysrc = self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(node))
					mydst = self._semantic_graph.hashable_space_point(self._semantic_graph.space_point(onode))
					external.append(  (get_label(node),self._semantic_graph.space_point(onode),oext_node,rel,(mysrc,mydst) ) )
					if not include_others: continue
				if rel == "p2c": color+= ",rankdir=TB"
				elif rel== "c2p": color += ",rankdir=BT, constraint=false"
				else:
					color += ",dirType=none,arrowType=none,constraint=false"
					if not include_others: continue
				print >>out,get_label(node),"->",get_label(onode),"[label="+rel+color+"];" 
		print >>out,"label = \"overlay node ",self.id_str(),"\";"
		print >>out ,"}"
		return external
	def vertex_id(self,sp):
		return self._semantic_graph.vertex_id(sp)
	def get_label(self,vertex_id):
		return ""+"".join([str(x).replace("-","").replace(".","_") for x in self._semantic_graph.space_point(vertex_id)])+""
	def get_Label(self,vertex_id):
		return ""+",".join([str(x).replace("-","").replace(".","_") for x in self._semantic_graph.space_point(vertex_id)])+""
		#return str(self.id())+"000"+str(vertex_id)

