#Changed?
# Establish directories
# NB BRT Source Functions must be located in the working directory

in.dir = '/home1/99/jc152199/brt/data/'
setwd(in.dir)
out.dir = '/home1/99/jc152199/brt/data/'

# Load the gbm library to perform Boosted Regression Tree Analysis
necessary=c('gbm')
#check if library is installed
installed = necessary %in% installed.packages()
#if library is not installed, install it
if (length(necessary[!installed]) >=1) install.packages(necessary[!installed], dep = T)
#load the libraries
for (lib in necessary) library(lib,character.only=T)

# Read in data to model

model.data = read.csv('/home1/99/jc152199/MicroclimateStatisticalDownscale/ToAnalyse/MicroMacroMinMaxASCII.csv',header=T)

# Randomly shuffle model.data before reading into gbm()

model.data = model.data[c(sample(c(1:nrow(model.data)), nrow(model.data),replace=FALSE)),]

# Randomly sample half of data set to produce a training and a testing set

train.data = model.data[c(sample(c(1:nrow(model.data)), round(nrow(model.data)*.5,0),replace=FALSE)),]
test.data = model.data[-c(sample(c(1:nrow(model.data)), round(nrow(model.data)*.5,0),replace=FALSE)),]

# Establish a list of parameters for the BRT model, including learning rate, tree complexity, and number of trees

lr.list = c(.1,.05,.025,.01,.005,.001,.0005)
tc.list = c(1,3,5)
tf.list = c(.1,.25,.5,.75)

# Establish two null dataframes to write data into model performance data into

cv.error.data = data.frame(learning.rate=NA,tree.complexity=NA,iteration=NA,optimal.tree.num=NA,cv.error=NA)	
holdout.deviance.data = data.frame(learning.rate=NA,tree.complexity=NA,iteration=NA,optimal.tree.num=NA,cv.error=NA)	

	for (tc in tc.list)
	
	{

		for(lr in lr.list)
	
		{
		
		# Run gbm model
		
		brt.gbm = gbm(formula = micro_max~AWAP_max+coastdist+fpcmean+fpcvar+roaddist+solar, distribution = "gaussian", data = model.data, n.trees = nt, interaction.depth = tc.list[3], shrinkage = lr.list[1], train.fraction = tf.list[1], bag.fraction = 0.5, cv.folds=10)
		
		# Determine optimal tree number using cross-validation on the model data
		
		op.tree.num = gbm.perf(brt.gbm, plot.it = FALSE, oobag.curve = FALSE, overlay = TRUE, method='cv')
		
		# Create a temporary data frame that compiles CV error rate against tc, lr, iteration, and optimal tree number
		
		t.data = data.frame(learning.rate=c(rep(lr,brt.gbm$n.trees)),tree.complexity=c(rep(tc,brt.gbm$n.trees)),iteration=c(seq(1,brt.gbm$n.trees,1)),optimal.tree.num=c(rep(op.tree.num,brt.gbm$n.trees)),cv.error=c(brt.gbm$cv.error))
		
		# Bind results from innermost loop into one dataframe for writing out later
		
		cv.error.data = rbind(cv.error.data,t.data)
		
		# Perform prediction of model onto testing data
		# NB - Supplying a vector to n.trees within predict.gbm() returns predictions for each iteration of the model
		# This function isn't really necessary unless you want to return a vector of fitted values
		# from an independent dataset
		#preds = predict.gbm(brt.gbm, test.data, n.trees=c(1:5000), type="response")
		
		# Bind holdout deviance data into a single dataframe for writing out later
		
		
	
		}
	
	}
	
write.csv(cv.error.data, file='cv.error.summary.5000trees.csv',row.names=F)

#End

# Plot cv.error and valid.error on same graph

ylims = c(0,max(c(max(brt.gbm$cv.error),max(brt.gbm$valid.error))))
xlims = c(0,10000)

png('cverror.validerror.compare.png')

plot(c(seq(1,10000,1)), brt.gbm$valid.error, xlab = 'Iteration', ylab = 'Deviance', main = 'Error Comparison', col=1, type='n', xlim=c(xlims), ylim=c(ylims))
	
legend("topright", legend=c('CV Error','Valid Error'),text.col=c(1,2),bty="n")

lines(brt.gbm$cv.error,col=1)
lines(brt.gbm$valid.error,col=2)


dev.off()

