rm (list = ls (all = TRUE))

#-------------------------------------------------------------------------------
#Defining the main directory
#-------------------------------------------------------------------------------
#Directory="N:/Personal/Projects/Informatica/ODE-Lt/"
#Directory="C:/Users/Jose Miguel/Dropbox/SD-Informatika/ODE-Lt/"
Directory="D:/DocumentsJM/Trabajos/Journals/Working papers/Variational Bayes/vb-stability/ODE-Lt/"

#-------------------------------------------------------------------------------
#Reading data
#------------------------------------------------------------------------------- 
setwd (Directory)
df.sl <- read.csv ("Data2RShelfLife.txt", header = TRUE)
df.sl <- data.frame (df.sl)
#df.sl <- df.sl [grepl ("DGS", df.sl$Sample) == FALSE, ]
#df.sl <- df.sl [grepl ("FBS", df.sl$Sample) == FALSE, ]
df.sl <- df.sl [df.sl$Temp == 22, ]
df.sl <- df.sl [df.sl$RH == 5, ]
df.sl$G0star <- 55.4 / 24 * df.sl$STS
df.sl$L <- df.sl$L
df.sl$time <- df.sl$T

N <- length (df.sl$time)
for (i in 1 : (N - 1)) {
	if (df.sl$time [i] == 0 && df.sl$time [i + 1] == 0) {
		df.sl$time [i] <- -1
	}
}
if (df.sl$time [N] == 0) {
	df.sl$time [N] <- -1
}
df.sl <- df.sl [df.sl$time >= 0, ]
N <- length (df.sl$time)
NN <- nrow (df.sl [df.sl$time == 0, ])
start <- vector (mode = "numeric", NN)
end <- vector (mode = "numeric", NN)
k <- 1
for (i in 1 : (N - 1)) {
	if (df.sl$time [i] == 0) {
		start [k] <- i
		if (k > 1) {
			end [k - 1] <- i - 1
		}
		k <- k + 1
	}
}
end [k - 1] <- N

patients <- NULL
for (i in 1 : NN) {
	df.sl$patient [start [i] : end [i]] <- i
	patients <- rbind (patients, 
		c (df.sl$L [start [i]], df.sl$G0star [start [i]]))
}

initial_conditions=cbind((100-patients[,1]-patients[,2]), patients[,1])
time2gams=matrix(ncol=1,nrow=3)
data2gams=matrix(ncol=1,nrow=3)

for(i in 1:nrow(patients)){
  filter=(((df.sl$patient)==i) & ((df.sl$time)!=0))
  time2gams=cbind(time2gams, (df.sl$time)[filter])
  data2gams=cbind(data2gams, (df.sl$L)[filter])
}

dataX=time2gams=time2gams[,-1]
dataY=data2gams=data2gams[,-1]


library (mvtnorm)
library (limSolve)
library (MCMCpack)
colnames (initial_conditions) <- seq (1, ncol (initial_conditions))
colnames (data2gams) <- seq (1, ncol (data2gams))
colnames (time2gams) <- seq (1, ncol (time2gams))
#%%%%%%%%%%%%%%%%%%%%%%%%
#Phase I
#%%%%%%%%%%%%%%%%%%%%%%%%
#setting directory for Phase I
VB_directory <- paste (Directory, "v1/", sep = "")
setwd (VB_directory) 
write.csv (data2gams, file = "data_GAMS.csv")
write.csv (time2gams, file = "time_GAMS.csv")
write.csv (initial_conditions, file = "initial_conditions.csv")
phase1 <- "PhaseI.gms" 
gams_call_p1 <- paste ('gams', phase1, "--v", nrow (data2gams), "--p", nrow (patients)) 
phase1_file <- "phase1.csv"
if (file.exists (phase1_file)) file.remove (phase1_file)
system.time (
	system (gams_call_p1, wait = TRUE, show.output.on.console = TRUE,
		invisible = FALSE)
)
if (!file.exists (phase1_file)) {
	stop ("Phase I error") 
}
output1 <- read.csv (phase1_file, header = FALSE)
#saving max log-post. value
max_joint <- as.numeric (output1 [length (output1)]) 
#saving computational time for Phase I
CPU_sec1 <- as.numeric (output1 [1])
#saving means 
mean_posterior <- as.numeric (output1 [2 : (length (output1) - 1)])                                                                                          
#%%%%%%%%%%%%%%%%%%%
#Pre-processing
#%%%%%%%%%%%%%%%%%%% 
library(deSolve)
Stability<-function(t,state,parameters){
  C1=state[1]
  C2=state[2]
  C3=state[3]
  k1=exp(parameters[1])
  k2=exp(parameters[2])
  dC1<--k1*C1*C3
  dC2<- k2*C3
  dC3<- k1*C1*C3-k2*C3
  list(c(dC1, dC2, dC3))
}
#----------------------
#Likelihood
#----------------------
likelihood <- function (x, ini, dataX, dataY1, U = NULL, center = NULL, 
	pred = FALSE, uniforming = 0, raw_val = TRUE)
{
	if (is.null (U)) {
    if(is.null(center)){
		  xt <- rep (0, length (x))
		  #PARAMETERS
		  xt <- x}else{
      xt <- x+center}
	} else {
    if(is.null(center)){                         
      xt <- Solve (U, x)
    }else{
		  xt <- Solve (U, x)
    xt <- xt + center  }
	}
  n=ncol(dataX)
  prob_C2=0
  for(i in 1:n){
    y=c(ini[i,],100-sum(ini[i,]))
    times=c(0,dataX[,i])
    pred_out=ode(y, times, func=Stability, parms=xt) 
    C2_pred=pred_out[-1,3]
    residual=log(dataY1[,i])-log(C2_pred)
    prob_C2=prob_C2+sum(dnorm(as.matrix(residual), mean=0, sd=exp(xt[length(x)]), 
      log=TRUE))
  }
	if (raw_val == TRUE) {
		if (pred == FALSE) {
			c (xt, (uniforming + prob_C2))
		} else {
      		list(prob_C2, y1_pred, xt, exp(xt[length(x)]))
		}               
	} else {
		if (pred == FALSE) {
			c (x, (uniforming + prob_C2))
		} else {
      		list (prob_C2, y1_pred, x, exp(xt[length(x)]))
		}
	}
}
#-------------------------
#Prior distribution
#-------------------------
prior <- function (x, m, variance)
{
	if (x[3] > x[1]) {
#     	dmvnorm(x, m, variance, log = TRUE)
      	0
	} else {
		0
	}
}
#----------------------
#Posterior
#----------------------
joint <- function (x, ini, dataX, dataY1, U = NULL, center = NULL, uniforming = 0,
	raw_val = TRUE, logf = TRUE,  ...)
{
	if (logf) {
      	prior (x, ...) + likelihood (x, ini, dataX, dataY1, U, center, 
			pred = FALSE, uniforming, raw_val)
	} else {
		prior (x, ...) * likelihood (x, ini, dataX, dataY1, U, center, 
			pred = FALSE, uniforming, raw_val)
	}
}

#---------------------------------
#Obtaining q-points from range
#---------------------------------
legendre_conversion <- function (var_range, q_points)
{
	upper <- var_range [2]
	lower <- var_range [1]
	eval_points_var <- (upper + lower) / 2 + (upper - lower) / 2 * q_points
	eval_points_var
} 

#-----------------
#Range definition
#-----------------
range_definition <- function (eval_var, var_range, means, q_points, ini, dataX, 
	dataY, threshold, U = NULL, center = NULL, raw_val = TRUE)
{
	#defining evaluating points from quad. points
	eval_points_var <- legendre_conversion (var_range, q_points)
	#creating a matrix with mean values
	eval_points <- matrix (data <- rep (means, length (q_points)), 
		ncol = length(means), nrow = length (q_points), byrow = TRUE) 
	#subst. values of the var to evaluate
	eval_points [, eval_var] <- eval_points_var 
	results <- apply (eval_points, MARGIN = 1, 
		function (x)
		{
			joint (x, ini, dataX, dataY, U, center, uniforming = 0,
				raw_val, logf = TRUE)
		}) [(length (means) + 1), ]
	significant <- (results >= threshold) #comparing with threshold 
	#"good" points position
	positions <- seq (length (eval_points_var)) [significant] 
	min_value <- min	(positions) #min "good" value position
	max_value <- max	(positions) #max "good" value position
	min_a <- if (min_value != 1)
		{
			min_value - 1
		} else {
			min_value
		}# is it the given limit?
	max_b <- if (max_value != length (q_points))
		{
			max_value + 1
		} else {
			max_value
		} #given lim.?
	matrix (data <- c (eval_points_var [min_a], eval_points_var [min_value], 
		eval_points_var [max_value], eval_points_var [max_b]), ncol = 2, 
		nrow = 2, byrow = T) 
}                
setwd(Directory) 
#Range for initial search (lower difference, upper diff.)
var_range <- matrix (data = rep (60, (2 * length (mean_posterior))), 
	ncol = 2, 
	nrow = length (mean_posterior), byrow = TRUE)
#reading Legendre-Quadrature points and weights
legendre <- read.csv ("legendre05.csv", header = TRUE) #5 points
tmp <- proc.time ()
threshold <- -25 + max_joint #defining relative threshold
threshold2 <- -10
U_matrix=diag(rep(1,length(mean_posterior)))
max_it=2

#--------------------------------
#Obtaining range in raw values
#--------------------------------
for(j in 1:max_it){
  var_range <- matrix (data = rep (300, (2 * length (mean_posterior))), 
  	ncol = 2, 
  	nrow = length (mean_posterior), byrow = TRUE)
  var_range [, 1] <- -var_range [, 1] #range center in zero
  var_range [, 2] <- +var_range [, 2] #range center in zero
  for (i in 1 : length (mean_posterior)) {
  	# First pass-range_definition 
  	firstPass <- range_definition (i, var_range [i, ], 
  		rep (0, length (mean_posterior)), legendre [, 1], initial_conditions, dataX, dataY, 
  		threshold, U = U_matrix, center = mean_posterior, 
  		raw_val = FALSE)
  	#Min redefinition
  	#cretaing a modified legendre to avoid errors
  	modified_legendre <- legendre 
  	modified_legendre [1, 1] <- -1 #changing first value
  	modified_legendre [nrow (legendre), 1] <- 1 #changing last value
  	min_a <- firstPass [1, 1]
  	min_b <- firstPass [1, 2]
  	while (min_a <= (min_b - 1e-4)) {
  		min_redef <- range_definition (i, c (min_a, min_b), 
  			rep (0, length (mean_posterior)),
  			modified_legendre [, 1], initial_conditions, dataX, dataY, threshold, 
  			U = U_matrix, 
  			center = mean_posterior, raw_val = FALSE)
  		min_a <- min_redef [1, 1]# first bad value
  		min_b <- min_redef [1, 2]# minimum "good" value
  	}
  	#Max redefinition
  	max_a <- firstPass [2, 1]
  	max_b <- firstPass [2, 2]
  	while (max_a <= (max_b - 1e-4)) {
  		max_redef <- range_definition (i, c (max_a, max_b),
  			rep (0, length (mean_posterior)),
  			modified_legendre [, 1], initial_conditions, dataX, dataY, threshold, 
  			U = U_matrix, 
  			center = mean_posterior, raw_val = FALSE)
  		max_a <- max_redef [2, 1]# maximum "good" value
  		max_b <- max_redef [2, 2]# first bad value
  	}
  	var_range [i, ] <- c (min_a, max_b)                                                    
  }
  if(j!=max_it){
    #----------------------------------------------
    #Evaluating the joint distribution in raw range
    #----------------------------------------------
    uniforming <- 10 - max_joint
    eval_points <- apply (var_range, MARGIN = 1,
    	function (x)
    	{
    		#convert to legendre points
    		legendre_conversion (x, legendre [, 1])
    	}) 
    #converting to list
    eval_points <- as.list (as.data.frame (eval_points)) 
    #combinations to be evaluated
    eval_points <- expand.grid (eval_points) 
    eval_points <- apply (eval_points, MARGIN = 1, 
    	function (x)
    	{
    		joint (x, initial_conditions, dataX, dataY, U = U_matrix, center = mean_posterior,
    			uniforming = uniforming, raw_val = TRUE, logf = TRUE)}) 
    #-----------------------------------------
    #Obtaining the orthogonal matrix - for SLE
    #-----------------------------------------
    #Spectral decomposition
    U_matrix <- eigen (cov.wt (
    	t (eval_points [-(length (mean_posterior) + 1), ]),
    	(exp (eval_points [(length (mean_posterior) + 1), ])), 
    	center = mean_posterior) [[1]]) 
    #orthogonal matrix to rotate axes/ eigen-vectors in col
    U_matrix <- U_matrix [[2]] 
    U_matrix <- t (U_matrix)
  }
}

#---------------------------------------------------
#Evaluating the joint distribution Trapezoidal rule
#---------------------------------------------------
n_points <- 9 
uniforming <- 25 - max_joint
eval_points <- apply (var_range, MARGIN = 1,
	function(x) 
	{
		(seq (n_points + 1) -1 ) * ((x [2] - x [1]) / n_points) + x [1]
	}) 
#evaluation points
eval_points <- as.list (as.data.frame (eval_points)) #converting to list
eval_points <- expand.grid (eval_points) #combinations to be evaluated
eval_points <- apply (eval_points, MARGIN = 1, 
	function (x)
	{
		joint (x, initial_conditions, dataX, dataY, U = U_matrix, center = mean_posterior,
			uniforming = uniforming, raw_val = TRUE, logf = TRUE)})
weight_points <- rep (1, (n_points + 1))
weight_points [1] <- 0.5
weight_points [length (weight_points)] <- 0.5
weight_points <- apply (as.data.frame (seq (length (mean_posterior))), 1,
	FUN = function (x){weight_points})
#converting to list
weight_points <- as.list (as.data.frame (weight_points)) 
weight_points <- expand.grid (weight_points) #combinations to be evaluated
weight_points <- apply (weight_points, 1, prod)
#-----------------------------------
#Preparing data to be sent to GAMS
#-----------------------------------  
#eliminating those points whose joint probability is less than threshold  
position <- seq (ncol (eval_points)) [
	eval_points [nrow (eval_points), ] >= threshold2]
eval_points <- eval_points [, position]
weight_points <- weight_points [position]
eval_points <- rbind (eval_points, weight_points)
rownames (eval_points) <- seq (nrow (eval_points))
colnames (eval_points) <- seq (ncol (eval_points))
mean_posterior <- as.data.frame (mean_posterior)
row.names (mean_posterior) <- apply (as.data.frame (
	seq (nrow (mean_posterior))), 1, 
	FUN = function (x) {paste ("j", x, sep = "")})
colnames (mean_posterior) <- seq (ncol (mean_posterior))
colnames (U_matrix) <- seq (ncol (U_matrix))
proc.time () - tmp
#%%%%%%%%%%%%%%%%%%%%
#Solving Phase II
#%%%%%%%%%%%%%%%%%%%% 
#setting directory for Phase II
setwd (paste (Directory, "v2/", sep = "")) 
#saving file w/significant points
write.csv (t (eval_points), file = "eval_points.csv") 
write.csv (U_matrix, file = "auxU_matrix.csv") #saving file with means 
#saving file with means
write.csv (mean_posterior, file = "meanPhase1.csv")  
phase2 <- "PhaseII.gms" # GAMS file
#Command line to execute GAMS
gams_call_p2 <- paste ('gams', phase2, "--v", ncol (eval_points)) 
phase2_mean_file <- "phase2_mean.csv"
phase2_cov_file <- "phase2_cov.csv"
phase2_U_file <- "phase2_U.csv"
if (file.exists (phase2_mean_file)) file.remove (phase2_mean_file)
if (file.exists (phase2_cov_file)) file.remove (phase2_cov_file)
if (file.exists (phase2_U_file)) file.remove (phase2_U_file)
system.time (
	system (gams_call_p2, wait = TRUE, invisible = FALSE)
) #executing optimization
if (!file.exists (phase2_mean_file)) stop ("Phase II error")
mean_posterior2 <- read.csv (phase2_mean_file, header = FALSE) 
covariance <- read.csv (phase2_cov_file, header = FALSE) 
#saving CPU time for this stage
CPU_sec2 <- as.numeric (mean_posterior2 [1]) 
mean_posterior2 <- as.numeric (mean_posterior2 [-1]) #saving mean values
covariance <- matrix (covariance, nrow = length (mean_posterior2),
	ncol = length (mean_posterior2), byrow = TRUE) #saving covariance
U_matrix2 <- read.csv (phase2_U_file, header = FALSE) #reading results
U_matrix2 <- matrix (U_matrix2, nrow = length (mean_posterior2), 
	ncol = length (mean_posterior2)) #saving U-orthogonal matrix
sampling_posterior <- as.mcmc (rmvnorm (1e3, mean = mean_posterior2,
	sigma = covariance))

setwd (Directory)
save.image ("THVBmodel.Rdata")
plot (sampling_posterior)
# end of conversion


                                                      