# -*- coding: utf-8 -*-
"""
Created on Mon Aug 16 16:17:49 2021

@author: Xiaobin Li
"""
import os
import math
import numpy as np
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn import neighbors
from sklearn import svm
from sklearn import tree
from sklearn import ensemble
from sklearn.metrics import mean_squared_error
import csv
from gplearn import genetic
from gplearn.genetic import SymbolicRegressor
from datetime import datetime

    
def onetime(modelname,nfolds):
    csv_reader=csv.reader(open("data1-8.csv"))
    
    L1=[]
    L2=[]
    L3=[]
    L4=[]
    L5=[]
    L6=[]
    L7=[]
    L8=[]                       #rmse for 8 datasets
    y=[]                        #Final
    yp=[]                       #Pfinal
    xunit=[]                    #2 unit tests
    xunit_question=[]           #8 questions for usual
    xunit_question_detail=[]    #auxiliary info for 8 questions
    
    n=0
    for row in csv_reader:
        if n==0:
            n=n+1
            continue
        n=n+1
        y.append(row[1])                        #Final
        yp.append(row[2])                       #Pfinal
        xunit.append(row[3:5])                  #2 unit tests
        xunit_question.append(row[5:13])        #8 questions     
        xunit_question_detail.append(row[13:])  #auxiliary info
    
    ya=np.array(y,dtype=float)
    ypa=np.array(yp,dtype=float)
    xunita=np.array(xunit,dtype=float)                          
    xunit_questiona=np.array(xunit_question,dtype=float)
    xunit_question_detaila=np.array(xunit_question_detail,dtype=float)
    
    
    kf = KFold(n_splits=nfolds,shuffle=False)  
    if modelname=='LR':
        model = LinearRegression()
    elif modelname=='Ridge':
        model = Ridge(alpha=.5)
    elif modelname=='Lasso':
        model=Lasso(alpha=.5)
    elif modelname=='SVR':
        model=svm.SVR()
    elif modelname=='DT':
        model=tree.DecisionTreeRegressor()
    elif modelname=='KNN':
        model=neighbors.KNeighborsRegressor()
    elif modelname=='RandomForest':
        model=ensemble.RandomForestRegressor(n_estimators=20)
    model1=model
    for train_index , test_index in kf.split(ya):  
        model.fit(xunita[train_index],ypa[train_index])
        ypp=model.predict(xunita[test_index])
        rmse=math.sqrt(mean_squared_error(ypa[test_index],ypp))
        L1.append(rmse)
        
        model1.fit(xunita[train_index],ya[train_index])
        yp=model1.predict(xunita[test_index])
        rmse=math.sqrt(mean_squared_error(ya[test_index],yp))
        L5.append(rmse)

    for train_index , test_index in kf.split(ya): 
        model.fit(xunit_questiona[train_index],ypa[train_index])
        ypp=model.predict(xunit_questiona[test_index])
        rmse=math.sqrt(mean_squared_error(ypa[test_index],ypp))
        L2.append(rmse)
        
        model1.fit(xunit_questiona[train_index],ya[train_index])
        yp=model1.predict(xunit_questiona[test_index])
        rmse=math.sqrt(mean_squared_error(ya[test_index],yp))
        L6.append(rmse)

    
    xtmp=np.hstack((xunit_questiona,xunit_question_detaila))
    for train_index , test_index in kf.split(ya): 
        model.fit(xtmp[train_index],ypa[train_index])
        ypp=model.predict(xtmp[test_index])
        rmse=math.sqrt(mean_squared_error(ypa[test_index],ypp))
        L3.append(rmse)
        
        model1.fit(xtmp[train_index],ya[train_index])
        yp=model1.predict(xtmp[test_index])
        rmse=math.sqrt(mean_squared_error(ya[test_index],yp))
        L7.append(rmse)
    
    xtmp=np.hstack((xunit_questiona,xunit_question_detaila))
    function_set = ['add', 'sub', 'mul', 'div',
                    'sqrt', 'log', 'abs', 'neg', 'inv',
                    'max', 'min']
    for train_index , test_index in kf.split(ya): 
        gp = genetic.SymbolicTransformer(generations=20, population_size=2000,
                             hall_of_fame=100, n_components=10,
                             function_set=function_set,
                             parsimony_coefficient=0.0005,
                             max_samples=0.9, verbose=1,
                             random_state=0, n_jobs=3)
        gp.fit(xtmp[train_index],ypa[train_index])
        xtmp = gp.transform(xtmp)
        model.fit(xtmp[train_index],ypa[train_index])
        ypp=model.predict(xtmp[test_index])
        rmse=math.sqrt(mean_squared_error(ypa[test_index],ypp))
        L4.append(rmse)
        
        model1.fit(xtmp[train_index],ya[train_index])
        yp=model1.predict(xtmp[test_index])
        rmse=math.sqrt(mean_squared_error(ya[test_index],yp))
        L8.append(rmse)
    return L1,L2,L3,L4,L5,L6,L7,L8

###### user can change these parameters for different algorithm ##################
modelname='RandomForest'        #Modelname:LR,Ridge,Lasso,SVR,DT,KNN,RandomForest
ntimes=20                        #running times
nfolds=5                        #folds
##################################################################################
file=open(modelname+'.csv','w',newline='')       
f_csv=csv.writer(file)
header=['data1','data2','data3','data4','data5','data6','data7','data8']
f_csv.writerow(header)
times=0
while times<ntimes:
    L=onetime(modelname,nfolds)
    Lw=[]                       
    for pos in range(0,nfolds):
        for Litem in L:
            Lw.append(Litem[pos])
        f_csv.writerow(Lw)
        Lw=[]
    times+=1
file.close()