### This script will intersect ASCII data with occurrence and background points from MVD
### The end product will be 'SWD' files for MAXENT, there will be a single column for spp_id, lat, long, and a single column for each ASCII

# Establish directories

in.dir = '/home1/99/jc152199/MAXENT/'
setwd(in.dir)
bkgd.dir = '/home1/99/jc152199/MAXENT/bkgd/'
occur.dir = '/home1/99/jc152199/MAXENT/occurs/'

# Load library

library('SDMTools')
library('sp')
library('rgdal')

# Read in backgrount positions
# These are in easting/northing, convert to lat/long

base.pos = read.csv(paste(in.dir,'base_pnts.csv',sep=''))

# Add lat and long to this data frame of row/column positions

names(base.pos)[2]='east'
names(base.pos)[3]='north'

# Convert lat-long from base.pos to east-north
### EPSG 32755 WGS1984 Lat Long
### EGSG 4326 WGS1984 UTM

tout = as.data.frame(spTransform(SpatialPoints(cbind(base.pos[2:3]),proj4string=CRS("+init=epsg:32755")), CRS("+init=epsg:4326")))
base.pos$lat = tout[,2]
base.pos$long = tout[,1]

# Write out base.pos[4:5] as base.pnts

write.csv(base.pos[4:5],file=paste(bkgd.dir,'base_pnts_latlong.csv',sep=''),row.names=F)

# Read this file in as base.pnts

base.pnts = read.csv(paste(bkgd.dir,'base_pnts_latlong.csv',sep=''),header=T)

# Now need to intersect base.pnts with all ASCII data (8 BC's, 4mC's, and BVG)
# Start by creating a list of files to intersect with

bc.files = list.files('/home1/99/jc152199/MicroclimateStatisticalDownscale/250mASCII/BC6_monthly/',pattern='.asc.gz',recursive=T,full.names=T)
mc.files = list.files('/home1/99/jc152199/MicroclimateStatisticalDownscale/250mASCII/microCLIM/',pattern='.asc.gz',recursive=T,full.names=T)
bvg.file = '/home1/99/jc152199/MicroclimateStatisticalDownscale/250mASCII/STATIC/BVG_WTplusbuffer_LatLong_WGS1984_250mres.asc'
all.files = c(bc.files,mc.files,bvg.file)

# Define some names based on the ASCII files

bc.names = list.files('/home1/99/jc152199/MicroclimateStatisticalDownscale/250mASCII/BC6_monthly/',pattern='.asc.gz',recursive=T)
mc.names = list.files('/home1/99/jc152199/MicroclimateStatisticalDownscale/250mASCII/NEWmicroCLIM_76-05/',pattern='.asc.gz',recursive=T)
all.names = c(gsub('.asc.gz','',c(bc.names,mc.names)),'BVG')
all.names = gsub('_','',all.names)

# Use a for loop to intersect

j=1 # Position tracker

for (tfile in all.files) 

	{
	
	if(j!=13) {t.asc = read.asc.gz(paste(tfile,sep=''))} # Read in the ASCII file
	if(j==13) {t.asc = read.asc(paste(tfile,sep=''))} # Read in ASCII file
	
	t.data = extract.data(base.pnts[2:1],t.asc) # Intersect with points from base.pnts
	
	base.pnts = cbind(base.pnts,t.data) # Bind t.data to base.pnts
	
	names(base.pnts)[j+2] = paste(all.names[j],sep='') # Change the name of the new column
	
	cat(paste('\n',all.names[j],' - Processing Complete','\n',sep=''))
	
	j=j+1 # Change position tracker incrementally
	
	}
	
# Close loop

# Add a column for the species_id, which is in this case, 'background'

base.pnts = cbind(rep('bkgd',nrow(base.pnts)),base.pnts)

# Remove any NA's from base.pnts

base.pnts = na.omit(base.pnts)

# Rename the first column species_ID

names(base.pnts)[1] = 'spp'

write.csv(base.pnts[,c(1:11,16)],file=paste(bkgd.dir,'BIOCLIM_Background_SWD.csv',sep=''),row.names=F)
write.csv(base.pnts[,c(1:3,8:16)],file=paste(bkgd.dir,'microCLIM_Background_SWD.csv',sep=''),row.names=F)

# Now perform the same operation except use the occurrence records


occur.pnts = read.csv('/home1/99/jc152199/MAXENT/occurs/alloccurs.csv',header=T)

# Convert east/north to lat/long

tout = as.data.frame(spTransform(SpatialPoints(cbind(occur.pnts[1:2]),proj4string=CRS("+init=epsg:32755")), CRS("+init=epsg:4326")))
occur.pnts$lat = tout[,2]
occur.pnts$long = tout[,1]

# Remove unnecessary columns and change names

occur.pnts = data.frame(occur.pnts$species_ID,occur.pnts$lat,occur.pnts$long)
names(occur.pnts)[1] = 'spp'
names(occur.pnts)[2] = 'lat'
names(occur.pnts)[3] = 'long'

# Use a for loop to intersect

j=1 # Position tracker

for (tfile in all.files) 

	{
	
	if(j!=13) {t.asc = read.asc.gz(paste(tfile,sep=''))} # Read in the ASCII file
	if(j==13) {t.asc = read.asc(paste(tfile,sep=''))} # Read in ASCII file
	
	t.data = extract.data(occur.pnts[3:2],t.asc) # Intersect with points from base.pnts
	
	occur.pnts = cbind(occur.pnts,t.data) # Bind t.data to base.pnts
	
	names(occur.pnts)[j+3] = paste(all.names[j],sep='') # Change the name of the new column
	
	cat(paste('\n',all.names[j],' - Processing Completed','\n',sep=''))
	
	j=j+1 # Change position tracker incrementally
	
	}
	
# Close loop

# Remove NA points

occur.pnts = na.omit(occur.pnts)

# Write out two .csv files, one for microCLIM and one for BIOCLIM

write.csv(occur.pnts[,c(1:11,16)],file=paste(occur.dir,'BIOCLIM_Occurrence_SWD.csv',sep=''), row.names=F) # Occurrences for BIOCLIM runs
write.csv(occur.pnts[,c(1:3,8:16)],file=paste(occur.dir,'microCLIM_Occurrence_SWD.csv',sep=''), row.names=F) # Occurrences for microCLIM runs


#### 

in.dir = '/home1/99/jc152199/MicroclimateStatisticalDownscale/250mASCII/expCLIM/'
out.dir = '/home1/99/jc152199/MAXENT/ASCII/expCLIM/'

f = list.files(in.dir,full.names=T)
fnames = gsub('.asc.gz','',gsub('_','',basename(f)))

i=1

for (a in f)

{

b = read.asc.gz(a)

write.asc(b,file=paste(out.dir,fnames[i],'.asc',sep=''))

i=i+1

}



	
	
