###### INITIALIZE
setwd("C:/Users/spadmin/PhD/Projects/EPREmergence/Stats/SAUS/SAUS-NNM v1.0")
source("PGHandlerFunctions_20131001.R")
source("C:/Users/spadmin/PhD/Projects/GeoDBMover/R/GeoDBMover_v1.0.R")
library(mgcv)
library(Hmisc)
library(reshape2)
library(spdep)
library(MASS)


###### FUNCTIONS
plotMeanDiff <- function(match.df, original.df, matchvar, covar) {
	mdiff.mat <- matrix(NA, length(covar), 2)
	for (i in 1:length(covar)) {
		or.range <- max(original.df[,covar[i]]) - min(original.df[,covar[i]])
		or.min <- min(original.df[,covar[i]])
		or.var <- ((original.df[,covar[i]] + or.min)/or.range)*100
		ma.var <- ((match.df[,covar[i]] + or.min)/or.range)*100
		or.tr <- or.var[original.df[,matchvar] == 1]
		or.cr <- or.var[original.df[,matchvar] == 0]
		ma.tr <- ma.var[match.df[,matchvar] == 1]
		ma.cr <- ma.var[match.df[,matchvar] == 0]
		mdiff.mat[i,1] <- mean(or.tr) - mean(or.cr) 
		mdiff.mat[i,2] <- mean(ma.tr) - mean(ma.cr) 
	}
	mdiff.df <- data.frame(covar, mdiff.mat)
	names(mdiff.df) <- c("covar", "original", "matched")
	mdiff.df <- mdiff.df[order(-mdiff.df[,2]), ]
	
	lim <- abs(max(mdiff.df$original))
	
	dotchart2(mdiff.df$original, mdiff.df$covar, pch=16, xlab='Mean of 0-100 standardized variable', ylab='', xlim=c(-lim,lim), main="Mean Balance")
	dotchart2(mdiff.df$matched, mdiff.df$covar, pch=1, xlab='', ylab='', xlim=c(-lim,lim), add=TRUE)
	abline(v=0, lty=2, col="grey")	
	legend(x="bottomleft", legend=c("Unmatched", "Matched"), pch=c(16,1))
}


plotQQ <- function(match.df, original.df, matchvar, covar) {
	par(ask=TRUE)
	covar <- c("ps", covar)
	for (i in 1:length(covar)) {
		or.range <- max(original.df[,covar[i]]) - min(original.df[,covar[i]])
		or.min <- min(original.df[,covar[i]])
		or.var <- ((original.df[,covar[i]] - or.min)/or.range)*100
		ma.var <- ((match.df[,covar[i]] - or.min)/or.range)*100
		or.tr <- or.var[original.df[,matchvar] == 1]
		or.cr <- or.var[original.df[,matchvar] == 0]
		ma.tr <- ma.var[match.df[,matchvar] == 1]
		ma.cr <- ma.var[match.df[,matchvar] == 0]
		
		or.ks <- ks.test(or.tr, or.cr)
		ma.ks <- ks.test(ma.tr, ma.cr)
		
		or.ks.tx <- paste("KS-Test: D = ", round(or.ks$statistic,2), ", p = ", round(or.ks$p.value,2), sep="")
		ma.ks.tx <- paste("KS-Test: D = ", round(ma.ks$statistic,2), ", p = ", round(ma.ks$p.value,2), sep="")
		
		
		varname <- ifelse(covar[i] == "ps", "Propensity Score", covar[i])
		
		qqplot(or.tr, or.cr, main=varname, xlim=c(0, 100), ylim=c(0, 100))
		par(new=TRUE)
		qqplot(ma.tr, ma.cr, xlim=c(0, 100), ylim=c(0, 100), xlab="", ylab="", xaxt="n", yaxt="n", col="red")
		abline(a=0, b=1, lty=2)
		text(x=0, y=100, labels=ma.ks.tx, col="red", adj=0)
		text(x=100, y=0, labels=or.ks.tx, adj=c(1,0))
	}
}


clBoot <- function(glm.fit, fit.df, sp.var, reps=100, subst.coefs=length(coef(glm.fit)), verbose=FALSE){
	coef.mat <- matrix(NA, reps, subst.coefs)
	sp.units <- unique(fit.df[,sp.var])
	for (r in 1:reps) {
		boot.units <- sample(sp.units, length(sp.units), replace = TRUE)
		boot.df <- data.frame(boot.units)
		names(boot.df) <- sp.var
		boot.df <- merge(boot.df, fit.df, by=sp.var, all.y=FALSE,)
		boot.fit <- update(glm.fit, data=boot.df)
		
		if(verbose){
			print(r)
			flush.console()
		}
		
    if (!is.null(boot.fit$converged)) {
  		if (boot.fit$converged) {
  			coef.mat[r, ] <- coef(boot.fit)[1:subst.coefs]
  		} else {
  			coef.mat[r, ] <- NA
  			warning(paste("Bootstrap iteration", r, "did not converge. This sample will not be used for estimation."))
  		}
    } else {
      coef.mat[r, ] <- coef(boot.fit)[1:subst.coefs]
    }
	}
	coef.mat <- na.omit(coef.mat)
	boot.p <- rep(NA, ncol(coef.mat))
	for (s in 1:ncol(coef.mat)) {
		coef.dist <- coef.mat[,s]
		boot.p[s] <- 2*min(length(coef.dist[coef.dist <= 0]), length(coef.dist[coef.dist >= 0]))/length(coef.dist)
	}
	boot.se <- rep(NA, ncol(coef.mat))
	for (s in 1:ncol(coef.mat)) {
		coef.dist <- coef.mat[,s]
		boot.se[s] <- sd(coef.dist)
	}
	out.mat <- cbind(coef(glm.fit)[1:subst.coefs], sqrt(diag(vcov(glm.fit)))[1:subst.coefs], boot.se, boot.p)
	colnames(out.mat) <- c("coef", "se", "boot.se", "boot.p")
	
	print(paste(reps, "bootstrap iterations clustered on", length(sp.units), "groups;", reps-nrow(coef.mat), "discarded due to convergence failure."))
	print(out.mat)
	
	colnames(coef.mat) <- names(coef(glm.fit))[1:subst.coefs]
	return(coef.mat)
}

spFDiffSim <- function(sp.fit, sp.mat, target.ids, targetvar, targetval, reps=100, ival=0.95) {
	
	X <- sp.fit$X
	n <- nrow(X)
	rho <- coef(sp.fit)[1]
	beta <- coef(sp.fit)[-1]
	gamma <- coef(sp.fit)
	vc <- vcov(sp.fit)
	
	eq.y.hat.diff.target.vec <- vector("numeric", length(target.ids))
	st.y.hat.diff.target.vec <- vector("numeric", length(target.ids))
	# Calculate ST/EQ effect for each target id
	for (j in 1:length(target.ids)) {
		target.id <- target.ids[j]
		X.alt <- X
		X.alt[target.id, targetvar] <- targetval
		eq.y.hat.diff <- exp(solve((diag(n) - rho*sp.mat))%*%X%*%beta) - exp(solve((diag(n) - rho*sp.mat))%*%X.alt%*%beta)
		eq.y.hat.diff.target.vec[j] <- eq.y.hat.diff[target.id]
		st.y.hat.diff.target.vec[j] <- (exp(X%*%beta) - exp(X.alt%*%beta))[target.id]
	}
	eq.y.hat.diff.target <- mean(eq.y.hat.diff.target.vec)
	st.y.hat.diff.target <- mean(st.y.hat.diff.target.vec)
	
	eq.fdiff.sim <- rep(NA, reps)
	st.fdiff.sim <- rep(NA, reps)
	for (i in 1:reps) {
		gamma.sim <- mvrnorm(n = 1, gamma, vc)
		rho.sim <- gamma.sim[1]
		beta.sim <- gamma.sim[-1]
		
		eq.y.hat.diff.target.sim.vec <- vector("numeric", length(target.ids))
		st.y.hat.diff.target.sim.vec <- vector("numeric", length(target.ids))
		# Calculate ST/EQ effect for each target id
		for (j in 1:length(target.ids)) {
			target.id <- target.ids[j]
			X.alt <- X
			X.alt[target.id, targetvar] <- targetval
			eq.y.hat.diff <- exp(solve((diag(n) - rho.sim*sp.mat))%*%X%*%beta.sim) - exp(solve((diag(n) - rho.sim*sp.mat))%*%X.alt%*%beta.sim)
			eq.y.hat.diff.target.sim.vec[j] <- eq.y.hat.diff[target.id]
			st.y.hat.diff.target.sim.vec[j] <- (exp(X%*%beta.sim) - exp(X.alt%*%beta.sim))[target.id]
		}
		eq.fdiff.sim[i] <- mean(eq.y.hat.diff.target.sim.vec)
		st.fdiff.sim[i] <- mean(st.y.hat.diff.target.sim.vec)		
	}
	
	eq.cilim <- quantile(eq.fdiff.sim, c((1-ival)/2, 1-((1-ival)/2)))
	st.cilim <- quantile(st.fdiff.sim, c((1-ival)/2, 1-((1-ival)/2)))
	
	ret.mat <- rbind(c(eq.cilim[1], eq.y.hat.diff.target, eq.cilim[2]), c(st.cilim[1], st.y.hat.diff.target, st.cilim[2]))
	rownames(ret.mat) <- c("Equilibrium", "Short Term")
	
	return(ret.mat)
}

deleteOverlaps <- function(input.df) {
	# Get points without overlaps
	single.points <- unique(input.df$rpid[is.na(input.df$rpid2)])
	
	# Iterate through points with overlaps and randomly eliminate overlapping points (i.e., buffers)
	input.op.df <- input.df[!is.na(input.df$rpid2), names(input.df) %in% c("rpid", "rpid2")]
	# Shuffle data frame
	input.op.df <- input.op.df[sample(1:nrow(input.op.df)),]
	# Iterate through overlapping points and eliminate overlaps
	op.cand.points <- unique(input.op.df$rpid)
	op.keep.points <- op.cand.points
	for (i in 1:length(op.cand.points)){
		thispoint <- op.cand.points[i]
		if (thispoint %in% op.keep.points){
			oppoints <- input.op.df$rpid2[input.op.df$rpid == thispoint]
			op.keep.points <- op.keep.points[!(op.keep.points %in% oppoints)]
		}
	}
	
	return(c(op.keep.points, single.points))
}


###### CONNECT TO DB
con <- getPGConn("growup", 5432, "cederman.ethz.ch", "admin", "hNo7Yoo")


###### CREATE BUFFER SAMPLE ON DB
retval <- dbRunScript(con, "createCountryRandomPoints_v0.1.sql", return=FALSE, script.param=list(BUFFERSIZE=25000, SAMPLESIZE=100000, EXCLUSIONSIZE=200000))
retval <- dbRunScript(con, "assignCovariates_v1.0.sql", return=FALSE)  # Takes around 80 seconds
print("done")

# Get data from DB
rb.df <- dbGetTable(con, "hunzikp", "randombuffersdata")

# Data manipulation
rb.df$lnpop <- log(rb.df$pop1990 + 1)
rb.df$lnarea <- log(rb.df$area_sqkm)
rb.df$lncap <- log(rb.df$capdist_km + 1)
rb.df$lnborder <- log(rb.df$border_km + 1)
rb.df$cowid <- as.factor(rb.df$countries_cowid)
rb.df$groupcount09[is.na(rb.df$groupcount09)] <- 0
rb.df$groupcount65[is.na(rb.df$groupcount65)] <- 0
rb.df$gpdiff <- rb.df$groupcount09 - rb.df$groupcount65
rb.df$mgs09 <- ifelse(is.na(rb.df$min_groupsize09), 1, rb.df$min_groupsize09)
rb.df$max_excl09[is.na(rb.df$max_excl09)] <- 0
rb.df$max_excl[is.na(rb.df$max_excl)] <- 0
rb.df$max_powerless[is.na(rb.df$max_powerless)] <- 0
rb.df$max_discrim[is.na(rb.df$max_discrim)] <- 0
rb.df$mno <- ifelse(rb.df$max_powerless + rb.df$max_discrim > 0, 1, 0)
rb.df$lec <- log(rb.df$ethnologue_count+1)


# Subsetting
rb.df <- subset(rb.df, (ssafrica == 1 | asia == 1))  # Regional subsetting
rb.df <- subset(rb.df, cntr_petropoint == 1)  # Only countries with petroleum fields
rb.df <- rb.df[!is.na(rb.df$lnpop),]
rb.df <- rb.df[!is.na(rb.df$lnborder),]


###### NEAREST NEIGHBOR PROPENSITY SCORE MATCHING AND THINNING

# Get a list of overlaps from DB
op.df <- dbRunScript(con, "getBufferOverlaps_v0.1.sql", return=TRUE)

# Estimate propensity scores with fixed effects
glm.fit <- glm(petropoint ~  cowid + lnarea + lnpop*lncap + lnborder + elevsd + lec + groupcount09, data=rb.df, family=binomial)
glm.aic <- AIC(glm.fit)
ps.glm.logit <- predict(glm.fit, type="response")

gam.fit <- bam(petropoint ~ cowid + s(lnarea) + s(lnborder) + s(elevsd) + lec + s(lnpop,lncap) + groupcount09, data=rb.df, family=binomial)
gam.aic <- AIC(gam.fit)
ps.gam.logit <- predict(gam.fit, type="response")

if(gam.aic < glm.aic) {
	ps.logit <- ps.gam.logit
} else {
	ps.logit <- ps.glm.logit
}

# Prepare data frames
rb.df$ps <- ps.logit
nnd.df <- subset(rb.df, select=c("rpid", "petropoint", "ps"))

cd.df <- nnd.df[nnd.df$petropoint == 0,]
td.df <- nnd.df[nnd.df$petropoint == 1,]
match.mat <- NULL

# Matching parameters
nnn <- 1
replacement <- FALSE
caliper <- 0.005

repeat {
	# For each treated unit, find nnn nearest control units
	treated.ids <- unique(td.df$rpid)
	cand.list <- vector("list", 0)
	for (i in 1:length(treated.ids)){
		this.cd.df <- cd.df
		cd.del.vec <- vector("numeric", 0)
		thisid <- treated.ids[i]
		thisps <- td.df$ps[td.df$rpid == thisid]
		nnid.mat <- NULL
		
    # Restrict candidate controls to those within caliper
    this.cd.df <- this.cd.df[abs(this.cd.df$ps - thisps) <= caliper,,drop=FALSE]
    
    if (nrow(this.cd.df) > 0) {  # If there are valid candidate controls 
      repeat {  # Get nearest nnn neighbors
  			nnid <- this.cd.df$rpid[abs(this.cd.df$ps - thisps) == min(abs(this.cd.df$ps - thisps))]
  			ps.dist <- min(abs(this.cd.df$ps - thisps))
  			if (is.null(nnid.mat)) {
  				nnid.mat <- cbind(nnid, ps.dist)
  			} else {
  				nnid.mat <- rbind(nnid.mat, cbind(nnid, ps.dist))
  			}
  			
  			cd.op <- op.df$rpid2[op.df$rpid %in% nnid]
  			this.cd.df <- this.cd.df[!(this.cd.df$rpid %in% cd.op),]
  			this.cd.df <- this.cd.df[!(this.cd.df$rpid %in% nnid),]
  			cd.del.vec <- c(cd.del.vec, cd.op)
        
        if (nrow(nnid.mat) == nnn | nrow(this.cd.df) == 0) {  # Break if no more candidates or nnn neighbors selected
          break
        }
  		}
		  cand.list[[length(cand.list) + 1]] <- list(thisid, nnid.mat, cd.del.vec)
    } else {  # If treated units has no more candidate units
      td.df <- td.df[!(td.df$rpid == thisid),]  # Delete treated units from treatment list
    }
	}
	
	# Break loop if no more candidate matches found (no more treatment-control pairs within calipher)
	if (length(cand.list) == 0) {
	  break
	}
  
	# Select treatment unit where the nnn nearest control units are closest
	nc <- unlist(lapply(cand.list, function (x) {mean((x[[2]])[,2])})) # Get the average p-score per treatment unit (mean over second column of second list item)
	sel.list <- cand.list[[which(nc==min(nc))[1]]]
	tr.id <- sel.list[[1]]
	nnid.mat <- sel.list[[2]]
	cd.del.vec <- sel.list[[3]]
	
	# Delete overlapping units from working lists
	tr.op <- op.df$rpid2[op.df$rpid %in% tr.id]  # Delete selected treatment
	td.df <- td.df[!(td.df$rpid %in% tr.op),]  # Delete units overlapping with selected treatment
	cd.df <- cd.df[!(cd.df$rpid %in% cd.del.vec),]  # Delete units overlapping with selected controls
  if(!replacement) {
    cd.df <- cd.df[!(cd.df$rpid %in% nnid.mat[,1]),] # Delete selected controls
  }
	
	# Delete selected treatment from treatment list
	td.df <- td.df[!(td.df$rpid == tr.id),]

	# Remember nn pairs
	if (is.null(match.mat)) {
		match.mat <- cbind(tr.id, nnid.mat)
	} else {
		match.mat <- rbind(match.mat, cbind(tr.id, nnid.mat))
	}
	
	# Break loop if no more treated or control units
	if (nrow(td.df) == 0 | nrow(cd.df) == 0) {
		break
	}
	
	print(paste(length(unique(match.mat[,2])), " matches.", sep=""))
	flush.console()
}

match.df <- data.frame(rpid=c(unique(match.mat[,1]), unique(match.mat[,2])))
match.df <- merge(match.df, rb.df, by="rpid", all.x=TRUE, all.y=FALSE)
treated.df <- match.df[match.df$petropoint == 1,]
control.df <- match.df[match.df$petropoint == 0,]
paste(nrow(treated.df), "treated units.")
paste(nrow(control.df), "control units.")

# Some Balance Checking
plotMeanDiff(match.df, rb.df, "petropoint", c("lnarea", "lnpop", "lncap", "lnborder", "elevsd", "ethnologue_count"))
plotQQ(match.df, rb.df, "petropoint", c("lnarea", "lnpop", "lncap", "lnborder", "elevsd", "ethnologue_count"))




###### UPLOAD MATCHED DATA AND GET WEIGHTS

retval <- dbCreateTable(con, "hunzikp", "rbmatch", "rpid", "int", drop=TRUE) 
retval <- dbInsertDF(con, "hunzikp", "rbmatch", match.df[,"rpid",drop=FALSE])

weights.df <- dbRunScript(con, "getWeights_v0.1.sql", return=TRUE)
match.df <- match.df[order(match.df$rpid),]


# Within country all weights matrix
cw.df <- subset(weights.df, select=c("rpid", "rpid2", "cdist_km"))

cw.wide.df <- dcast(cw.df, rpid ~ rpid2)
cw.mat <- as.matrix(cw.wide.df[,-1])
rownames(cw.mat) <- colnames(cw.mat) <- unique(cw.df$rpid)

cw.list <- mat2listw(cw.mat, row.names = rownames(cw.mat), style="W")
cw.nb <- cw.list$neighbours
cw.wmat <- nb2mat(cw.nb, zero.policy=TRUE)
cw.smat <- Matrix(cw.wmat, sparse=TRUE)


# Within country top 3 weights matrix
t3cw.df <- subset(weights.df, select=c("rpid", "rpid2", "top3cdist_km"))

t3cw.wide.df <- dcast(t3cw.df, rpid ~ rpid2)
t3cw.mat <- as.matrix(t3cw.wide.df[,-1])
rownames(t3cw.mat) <- colnames(t3cw.mat) <- unique(t3cw.df$rpid)

t3cw.list <- mat2listw(t3cw.mat, row.names = rownames(t3cw.mat), style="W")
t3cw.nb <- t3cw.list$neighbours
t3cw.wmat <- nb2mat(t3cw.nb, zero.policy=TRUE)
t3cw.smat <- Matrix(t3cw.wmat, sparse=TRUE)


# Across countries top 3 weights matrix
t3w.df <- subset(weights.df, select=c("rpid", "rpid2", "top3dist_km"))

t3w.wide.df <- dcast(t3w.df, rpid ~ rpid2)
t3w.mat <- as.matrix(t3w.wide.df[,-1])
rownames(t3cw.mat) <- colnames(t3w.mat) <- unique(t3w.df$rpid)

t3w.list <- mat2listw(t3w.mat, row.names = rownames(t3w.mat), style="W")
t3w.nb <- t3w.list$neighbours
t3w.wmat <- nb2mat(t3w.nb, zero.policy=TRUE)
t3w.smat <- Matrix(t3w.wmat, sparse=TRUE)


# Across countries all weights matrix
w.df <- subset(weights.df, select=c("rpid", "rpid2", "dist_km"))

w.wide.df <- dcast(w.df, rpid ~ rpid2)
w.mat <- as.matrix(w.wide.df[,-1])
rownames(w.mat) <- colnames(w.mat) <- unique(w.df$rpid)

w.list <- mat2listw(w.mat, row.names = rownames(w.mat), style="W")
w.nb <- w.list$neighbours
w.wmat <- nb2mat(w.nb, zero.policy=TRUE)
w.smat <- Matrix(w.wmat, sparse=TRUE)


###### SAVE OR LOAD
write.csv(match.df, "Data/B100_NN10_WCSA.csv")
# match.df <- read.csv("Data/B100_RT_WCSA.csv")
# match.df$cowid <- as.factor(match.df$cowid)
write.csv(weights.df, "Data/Weigths_B100_NN10_WCSA.csv")
# weights.df <- read.csv("Data/Weigths_B50_RT_WCSA.csv")


###### ANALYSIS

#### GROUP COUNT

## IID COUNT MODELS
# DV: groupcount09
pois.fit <- glm(groupcount09 ~ petropoint, data=match.df, family="poisson")
summary(pois.fit)
qpois.fit <- glm(groupcount09 ~ petropoint, data=match.df, family="quasipoisson")
summary(qpois.fit)
qpois.cov.fit <- glm(groupcount09 ~ petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec, data=match.df, family="quasipoisson")
summary(qpois.cov.fit)
qpois.cov.fit <- glm(groupcount09 ~ cowid + petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec, data=match.df, family="quasipoisson")
summary(qpois.cov.fit)

# DV: Group diff 65-09
lm.fit <- glm(gpdiff ~ petropoint, data=match.df)
summary(lm.fit)
lm.cov.fit <- glm(gpdiff ~ petropoint + lnarea + lnpop + lncap + lnborder + elevsd + ethnologue_count, data=match.df)
summary(lm.cov.fit)
lm.cov.fit <- glm(gpdiff ~ cowid + petropoint + lnarea + lnpop + lncap + lnborder + elevsd + ethnologue_count, data=match.df)
summary(lm.cov.fit)

qpois.cov.fit <- glm(groupcount09 ~ petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec, data=match.df)
summary(qpois.cov.fit)

# SPATIAL REGRESSION
# DV: groupcount09
slm.t3cw.fit <- errorsarlm(groupcount09 ~ petropoint, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)
slm.t3cw.fit <- errorsarlm(groupcount09 ~ petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)
slm.t3cw.fit <- errorsarlm(groupcount09 ~ cowid + petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)

# DV: Group diff 65-09
slm.t3cw.fit <- errorsarlm(gpdiff ~ petropoint, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)
slm.t3cw.fit <- errorsarlm(gpdiff ~ cowid + petropoint + log(ethnologue_count+1) + lnarea + lnpop + lncap + lnborder + elevsd, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)



#### EXCLUSION

## IID MODELS
# DV: Exclusion 09
bin.fit <- glm(max_excl09 ~ petropoint, data=match.df, family="binomial")
summary(bin.fit)
bin.cov.fit <- glm(max_excl09 ~ cowid + petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec, data=match.df, family="binomial")
summary(bin.cov.fit)

# DV: Mean Exclusion
lm.fit <- glm(mean_excl ~ petropoint*lnborder + groupcount09*lnborder, data=match.df)
summary(lm.fit)
lm.cov.fit <- glm(mean_excl ~ petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec + groupcount09, data=match.df)
summary(lm.cov.fit)
lm.cov.fit <- glm(mean_excl ~ cowid + petropoint*lnborder + lnarea + lnpop + lncap + lnborder + elevsd + lec + groupcount09, data=match.df)
summary(lm.cov.fit)

# DV: Mean No Auton
lm.fit <- glm(mean_noauton ~ petropoint + groupcount09, data=match.df)
summary(lm.fit)
lm.cov.fit <- glm(mean_noauton ~ petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec + groupcount09, data=match.df)
summary(lm.cov.fit)
lm.cov.fit <- glm(mean_noauton ~ cowid + petropoint*lnborder + lnarea + lnpop + lncap + lnborder + elevsd + lec + groupcount09, data=match.df)
summary(lm.cov.fit)



## SPATIAL REGRESSION
# DV: Exclusion 09
slm.t3w.fit <- semprobit(max_excl09 ~ petropoint, data=match.df, W=t3w.smat, ndraw=100, burn.in=10, showProgress=TRUE)
summary(slm.t3w.fit)
slm.t3w.fit <- semprobit(max_excl09 ~ cowid + petropoint + log(ethnologue_count+1) + lnarea + lnpop + lncap + lnborder + elevsd, data=match.df, W=Matrix(t3w.wmat, sparse=TRUE), ndraw=500, burn.in=100, showProgress=TRUE)
summary(slm.t3w.fit)

# DV: Mean Exclusion
slm.t3cw.fit <- errorsarlm(mean_excl ~ petropoint*lnborder + groupcount09*lnborder, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)
slm.t3cw.fit <- errorsarlm(mean_excl ~ petropoint*lnborder + lnarea + lnpop + lncap + lnborder + elevsd + lec + groupcount09*lnborder, data=match.df, listw=cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)
slm.t3cw.fit <- errorsarlm(mean_excl ~ cowid + petropoint*lnborder + lnarea + lnpop + lncap + lnborder + elevsd + lec + lnborder*groupcount09, data=match.df, listw=cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)

# DV: Mean No Auton
slm.t3cw.fit <- errorsarlm(mean_noauton ~ petropoint + groupcount09, data=match.df, listw=cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)
slm.t3cw.fit <- errorsarlm(mean_noauton ~ petropoint + lnarea + lnpop + lncap + lnborder + elevsd + lec, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)
slm.t3cw.fit <- errorsarlm(mean_noauton ~ cowid + petropoint*lnborder + lnarea + lnpop + lncap + lnborder + elevsd + lec + groupcount09*lnborder, data=match.df, listw=t3cw.list, zero.policy=TRUE)
summary(slm.t3cw.fit)

