# Establish directories
# This script will run a number of different BRT models, each with a different set of parameters
# Then it will produce summaries/plots of each model
# NB BRT Source Functions must be located in the working directory

# Obtain command line arguments from .sh file

args=(commandArgs(TRUE))

# Evaluate the arguments for use in this script

for(i in 1:length(args)) 
	
	{
		
	eval(parse(text=args[[i]]))
 
	}
	
# Load Elith Source Code
setwd('/home1/99/jc152199/brt/')
source('brt.functions.R.cjsedit.r')

# Manual definition of optimal parameters for max temp model

#lr=.05;tc=5;tf=.75;nt=2000

# Establish directories

in.dir = '/home1/99/jc152199/brt/data/'

out.dir = paste(output.dir,sep='')

setwd(out.dir)

# Load gbm library

library('gbm')

# Read in data to model

model.data = read.csv('/home1/99/jc152199/MicroclimateStatisticalDownscale/ToAnalyse/final_new.csv',header=T)

# Remove NA's from dataset (n = 5 rows)

model.data = model.data[-which(is.na(model.data$micro_max)),]

# Randomly shuffle model.data before subsetting to training/testing sets

model.data = model.data[c(sample(c(1:nrow(model.data)), nrow(model.data),replace=FALSE)),]

# Randomly sample half of data set to produce a training and a testing set

sample.vector = c(sample(c(1:nrow(model.data)), round(nrow(model.data)*as.numeric(tf),0),replace=FALSE))

train.data = model.data[c(sample.vector),]
test.data = model.data[-c(sample.vector),]

# Run BRT model using gbm.step, parameters are defined as arguments
		
brt.gbm.step = gbm.step(data=train.data,gbm.x = c(5,7:16),gbm.y = 4,max.trees=as.numeric(nt),family = "gaussian",tree.complexity = as.numeric(tc), learning.rate = as.numeric(lr) ,bag.fraction = 0.5, n.folds=10)

# Produce partial dependence plots

gbm.plot(brt.gbm.step,variable.no = 0, nt = brt.gbm.step$n.trees, plot.layout = c(3,4))

if(tf<1)

{

# Predict onto test data

gbm.predicted = gbm.predict.grids(brt.gbm.step, test.data, want.grids = F, sp.name = "preds") # Make predictions onto testing dataset

# Calculate total deviance from test data

total.deviance = sum((test.data[,4]-gbm.predicted)*(test.data[,4]-gbm.predicted))/length(gbm.predicted) # Calculate mean deviance (weighted by number of observations)

residuals = test.data[,4]-gbm.predicted

# Make some plots

pdf('obs_versus_preds.pdf')

plot(test.data[,4],gbm.predicted, xlab = 'Obs. Testing Data', ylab = 'Fitted Values', main = paste('Obs vs Preds - Deviance - ',round(total.deviance,4),sep=''))
lines(rbind(c(0,0),c(40,40)),type='l', col='red')

dev.off()

pdf('obs_versus_resid.pdf')

plot(test.data[,4],residuals, xlab = 'Obs. Testing Data', ylab = 'Residual (Obs-Fit)', main = paste('Obs vs Resid - Deviance - ',round(total.deviance,4),sep=''))
#lines(rbind(c(0,0),c(40,40)),type='l', col='red')

dev.off()

# Write out model parameters and total deviance as a .csv file

params = data.frame(learning.rate = lr, tree.complexity = tc, train.fraction = tf, n.trees = nt, param.set = sprintf('%03i',i), op.tree.num = brt.gbm.step$n.trees, test.deviance = total.deviance, train.deviance =  brt.gbm.step$cv.statistics$deviance.mean)

write.csv(params, file=paste('maxtemp.model.summary_',sprintf('%03i',i),'.csv',sep=''),row.names=F)

# Done

}

if(tf==1)

{

total.deviance=NA

params = data.frame(learning.rate = lr, tree.complexity = tc, train.fraction = tf, n.trees = nt, param.set = sprintf('%03i',i), op.tree.num = brt.gbm.step$n.trees, test.deviance = total.deviance, train.deviance =  brt.gbm.step$cv.statistics$deviance.mean)

write.csv(params, file=paste('maxtemp.model.summary_',sprintf('%03i',i),'.csv',sep=''),row.names=F)

}

# Done




