# This file is part of Sonedyan.
#
# Sonedyan is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any
# later version.
#
# Sonedyan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009-2012 Jimmy Dubuisson <jimmy.dubuisson@gmail.com>

library(igraph)
library(hash)
library(entropy)
library(logging)

##########
## generic functions
##########

# stack implementation
cstack <- function()
{ 
	stack <- new.env()
	stack$.data <- vector()
	stack$push <- function(x) .data <<- c(.data, x)
	stack$pop  <- function()
	{
		tmp <- .data[length(.data)]
		.data <<- .data[-length(.data)]
		return(tmp)
	}
	environment(stack$push) <- as.environment(stack)
	environment(stack$pop) <- as.environment(stack)
	class(stack) <- "stack"
	stack
}

is.integer0 <- function(x)
{
	is.integer(x) && length(x) == 0L
}

# get main CC vertex IDs
# NB: mode can be 'weak' or 'strong'
get.main.cc.vertex.indexes <- function(g, mode = "strong")
{
  ccs <- clusters(g, mode)
  maxCcSize <- max(ccs$csize)
  # igraph is 0 based but R is 1 based...
  # NB: it seems this was corrected and it is not necessary anymore to remove 1
  maxCcId <- which(ccs$csize == maxCcSize)[1]
  which(ccs$membership == maxCcId)
}

# get main CC size
# NB: mode can be 'weak' or 'strong'
get.main.cc.size <- function(g, mode = "strong")
{
  ccs = clusters(g, mode)
  max(ccs$csize)
}

# get specified induced subgraph by vertex names 
get.subgraph <- function(core, vnames)
{
	vindexes = V(core)[V(core)$id %in% vnames]
	induced.subgraph(core, vindexes)
}

# get main connected component of the specified graph
get.main.cc <- function(g, mode = "strong")
{
	vids <- get.main.cc.vertex.indexes(g, mode)
	induced.subgraph(g, vids)
}

# get the Shannon entropy of the specified vector or joint probability matrix
get.entropy <- function(sm)
{
	sum <- 0
	
	for (i in sm)
	{
		if (i != 0)
			sum <- sum - i * log(i)
	}
	
	sum
}

# get the Shannon entropy of the specified vector or joint probability matrix
get.entropy.2 <- function(sm)
{
	entropy(sm)
}

# get transition matrix from row-normalized adjacency matrix
#
# NB: the graph is supposed to be a SCC
#
# @return row stochastic matrix
get.transition.matrix <- function(adj)
{
	adj / rowSums(adj)
}

# compute the Rayleigh quotient (which is equal to (Ax)*x / x*x or x*Ax/x*x)
get.rayleigh.quotient <- function(adj, pev)
{
	return((t(adj %*% pev) %*% pev) / (t(pev) %*% pev))
}

# get transition matrix from the principal eigenvector normalized adjacency matrix 
get.pev.transition.matrix.2 <- function(adj, pev)
{
	# compute principal eigenvalue
	lambda <- get.rayleigh.quotient(adj, pev)
	
	#loginfo(paste("Adj. matrix princ. eigenvalue: ", lambda))
	
	l <- nrow(adj)
	mat <- matrix(nrow = l, ncol = l)
	
	for (i in c(1:l))
	{
		q <- lambda * pev[i]
		
		for (j in c(1:l))
		{
			mat[i,j] = (adj[i, j] * pev[j]) / q
		}
	}
	
	return(mat)
}

# get transition matrix from the principal eigenvector normalized adjacency matrix
get.pev.transition.matrix <- function(adj)
{
	pev <- get.principal.eigenvector(adj)
	return(get.pev.transition.matrix.2(adj, pev))
}

# compute the Euclidean distance between 2 vectors
euclidean.distance <- function(x1, x2)
{
	dist(rbind(x1, x2))
}

# compute stationary distribution
get.stationary.distribution <- function(trans, maxiter = 100, epsilon = 1e-8)
{
	# get principal eigenvector of transition matrix
	pev <- get.principal.eigenvector(trans, maxiter, epsilon)
	# normalize vector
	return (pev / sum(pev))	
}

normalize.vector <- function(v)
{
	return(v / sqrt(t(v) %*% v))
}

# compute the principal eigenvector by the power method
get.principal.eigenvector <- function(mat, maxiter = 100, epsilon = 1e-8)
{
	l <- ncol(mat)
	x <- rep(1/l, l)
	counter <- 0
	d <- 2 * epsilon
	
	#loginfo(paste("Matrix size: ", l))
	
	while (counter < maxiter && d > epsilon)
	{
		# power iteration
		tmp <- as.vector(mat %*% x)
		# normalize vector
		tmp <- normalize.vector(tmp)
		# compute euclidean distance from last iteration
		d <- euclidean.distance(x, tmp)
		x <- tmp
		counter <- counter + 1
		#loginfo(paste("Current epsilon: ", d))
	}
	
	x
}

# get entropy rate of stochastic process
get.entropy.rate <- function(trans, stat)
{
	sum <- 0
	l <- ncol(trans)
	
	for (i in 1:l)
	{
		sumj <- 0
		
		for (j in 1:l)
		{
			p <- trans[i,j]
			if (p != 0)
				sumj <- sumj + p*log(p)
		}
		
		sum <- sum + stat[i]*sumj
	}
	
	-sum
}

# vertex random attack: remove nv nodes randomly
vertex.random.attack <- function(graph, nv)
{
	# random sample of vertices
	rs <- sample(V(graph), nv)
	delete.vertices(graph, rs)
}

# edge random attack: remove ne edges randomly
edge.random.attack <- function(graph, ne)
{
	# random sample of vertices
	es <- sample(E(graph), ne)
	delete.edges(graph, es)
}
