#!/usr/bin/python

import sys
import math
import networkx as nx

# Donald Knuth's Running mean and variance calculation
# See Knuth TAOCP vol 2, 3rd edition, page 232
class RunningStat:
	def __init__(self):
		self.clear()

	def clear(self):
		self.m_n = 0
		self.m_newM = 0
		self.m_newM = 0.0
		self.m_newS = 0.0
		self.m_max = 0
		self.m_min = 0

	def push(self, x):
		x = float(x)
		self.m_n += 1
		if ( self.m_n == 1 ):
			self.m_oldM = x
			self.m_newM = x
			self.m_oldS = 0.0
			self.m_max = x
			self.m_min = x
		else:
			self.m_newM = self.m_oldM + (x - self.m_oldM) / self.m_n
			self.m_newS = self.m_oldS + (x - self.m_oldM) * (x - self.m_newM)

			self.m_oldM = self.m_newM
			self.m_oldS = self.m_newS

			if ( self.m_max < x ):
				self.m_max = x
			if ( self.m_min > x ):
				self.m_min = x

	def count(self):
		return self.m_n

	def mean(self):
		return self.m_newM

	def var(self):
		if ( self.m_n > 1 ):
			return self.m_newS / (self.m_n - 1)
		else:
			return 0.0

	def std(self):
		return math.sqrt(self.var())

	def min(self):
		return self.m_min

	def max(self):
		return self.m_max

# creates a dictionary to map node names to the cluster they belong to.
# for now assumes one cluster per line in input file:
# 	clustername: vertex1 vertex2 vertex3 ...
class ClusterMap:
	def __init__(self,f):
		self.clustermap = dict()
		self.sizeStats = RunningStat()
		if ( f != None ):
			for line in f.readlines():
				if (line.strip().startswith('#')):
					continue
				s = line.replace(':',' ').split()
				if ( len(s) > 0 ):
					clustername = s[0]
					self.sizeStats.push( len(s[1:]) )
					for v in s[1:]:
						self.clustermap[v] = clustername
		

	def __getitem__(self,x):
		try: 
			return self.clustermap[x]
		except KeyError:
			return ""

	def __setitem__(self,x,y):
		self.clustermap[x] = y

	def __str__(self):
		return self.clustermap.__str__()

# Don't need to build graph.  Just perform counts.
def analyzeGraph(f, clustermap):
	clusters = []
	nodes = set()
	edgecount = dict()
	for line in f.readlines():
		if (line.strip().startswith('#')): # ignore comments
			continue
		s = line.split()
		if ( len(s) > 0 ):
			s = line.split()
			u = s[0]
			try: 
				cluster_u = clustermap[s[0]]
			except KeyError:
				cluster_u = ""
			v, cluster_v = s[1], clustermap[s[1]]
			try: 
				cluster_v = clustermap[s[1]]
			except KeyError:
				cluster_v = ""
			nodes.add(u)
			nodes.add(v)
			if ( clusters.count(cluster_u) == 0 ):
				clusters.append(cluster_u)
			if ( clusters.count(cluster_v) == 0 ):
				clusters.append(cluster_v)
			try:
				edgecount[(cluster_u, cluster_v)] = edgecount[(cluster_u, cluster_v)] + 1
			except KeyError:
				edgecount[(cluster_u, cluster_v)] = 1

	intra = RunningStat()
	inter = RunningStat()
	for i in range(len(clusters)):
		c1 = clusters[i]
		try:
			intra.push( edgecount[(c1,c1)] )
		except KeyError:
			intra.push( 0 )

		for j in range(i+1,len(clusters)):
			c2 = clusters[j]
			# undirected analysis
			try:
				a = edgecount[(c1,c2)] 
			except KeyError:
				a = 0
			try:
				b = edgecount[(c2,c1)]
			except:
				b = 0
			inter.push(a+b)

	# Print total statitistics
	print( "Total Graph Stats:" )
	print( "  Num Nodes: " + str(len(nodes)))
	print( "  Num Edges: " + str(sum(edgecount.values())))

	print( "Cluster Stats:" )
	print( "  Num Clusters: " + str(len(clusters)))
	print( "  Intra Mean  : " + str( intra.mean() ))
	print( "  Intra Std   : " + str( intra.std()  ))
	print( "  Intra Min   : " + str( intra.min()  ))
	print( "  Intra Max   : " + str( intra.max()  ))
	print( " " )
	print( "  Inter Mean  : " + str( inter.mean() ))
	print( "  Inter Std   : " + str( inter.std()  ))
	print( "  Inter Min   : " + str( inter.min()  ))
	print( "  Inter Max   : " + str( inter.max()  ))
	print( "Summary: " + str(len(nodes)) + "," + str(sum(edgecount.values())) + "," + str(len(clusters)) + "," 
										 + str( intra.mean() ) + "," + str( intra.std()  ) + "," + str( intra.min()  ) + "," + str( intra.max()  ) + ","
										 + str( inter.mean() ) + "," + str( inter.std()  ) + "," + str( inter.min()  ) + "," + str( inter.max()  ))

	print( "ClusterSizeStats: " + str(clustermap.sizeStats.count()) + "," +str(clustermap.sizeStats.mean()) + "," + str(clustermap.sizeStats.std()) )

	return 

def main():
	if ( len(sys.argv) != 3 and len(sys.argv) != 2): 
		print ("usage:\n" +
			"\tdlm2dot.py input_file [cluster_file] > output_file\n" +
			"\t\tall arguments are filenames\n")
		sys.exit()	

	inputfile = file(sys.argv[1], 'r')
	outputfile = sys.stdout
	try:
		clustermap = ClusterMap(file(sys.argv[2], 'r'))
	except:
		clustermap = ClusterMap(None)

	
	# process edge list
	graphlist = analyzeGraph(inputfile, clustermap)

	inputfile.close()

if __name__ == '__main__':
	main()
