#import library
library(hydroGOF)
library(clusterSim)
library(sqldf)


inputPath = "C:/in2"
outputPath = "C:/out2/"
inputFile = "_merge.csv"
model_result = data.frame()

listFile = list.files(path = inputPath,pattern=inputFile,full.names = TRUE,recursive=TRUE)
Size = length(listFile)

for(i in 1:Size){
	
	filename = listFile[i]
	
	start_index = regexpr("_[1-9]",filename)+1
	end_index = regexpr("_merge.csv",filename)-1
	rt = substr(filename,start_index,end_index)
	
	data = read.csv(filename)
	data = subset(data,data$station!=3)
	
	cook_model = lm(pm25~aod+temp+avg_temp+avg_rh+avg_preci_24,data)
	cookValue = cooks.distance(cook_model)
	data["cookValue"] = cookValue	
	nSample = nrow(data)	
	nPredict = length(cook_model$coefficients)-1
	cutoff = 4/(nSample-nPredict-1)

	data2 = subset(data,data$cookValue<=cutoff)
	
	for (j in 2012:2014) {
		testyear = j
		trainData2=subset(data2,data2$year!=testyear)
		testData2=subset(data2,data2$year==testyear)
		
		model2=lm(pm25~aod+temp+avg_temp+avg_rh+avg_preci_24,trainData2)
		pm25model=predict(model2,testData2)
		
		np_train_samples = nrow(trainData2)
		np_test_samples = nrow(testData2)
		
		model_re = sum(abs(testData2$pm25-pm25model)/testData2$pm25)/np_test_samples*100
		model_rmse = rmse(testData2$pm25,pm25model)
		model_r = cor(testData2$pm25,pm25model)
		model_mfb = (2*sum((pm25model-testData2$pm25)/(testData2$pm25+pm25model)*100))/np_test_samples
		model_mfe = (2*sum(abs(pm25model-testData2$pm25)/(pm25model+testData2$pm25)*100))/np_test_samples
		
		np_r2 = round(model_r*model_r,3)
		np_rmse = round(model_rmse,3)
		np_re = round(model_re,3)
		np_mfb = round(model_mfb,3)
		np_mfe = round(model_mfe,3)

		model_result = rbind(model_result,data.frame(rt,testyear,np_train_samples,np_test_samples,np_r2,np_rmse,np_re,np_mfb,np_mfe))
	}
		
}
write.csv(model_result,"C:/out2/npp_year_model_rs.csv")
print("Done")


