# -*- coding: utf-8 -*-

# Experiment to learn

import load_data

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer

from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier 
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier

from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
import os
from sklearn.externals import joblib

models_name=["RandomForestClassifier","LogisticRegression","kNN","DecisionTree","MLPClassifier","NaiveBayes","SVM","Bagging"]
    
def all_Algorithms():

    x_train, x_test, y_train, y_test=load_file()
    
    models_score=[]#置信度

    print(1)
    model_score=RandomForestClassifier_Algorithms(x_train, y_train)
    models_score.append(model_score)
 
    print(2)    
    model_score=LogisticRegression_Algorithms(x_train, y_train)
    models_score.append(model_score)

    print(3)    
    model_score=kNN_Algorithms(x_train, y_train)
    models_score.append(model_score)
    
    print(4)    
    model_score=DecisionTree_Algorithms(x_train, y_train)
    models_score.append(model_score)
    
    print(5)        
    model_score=MLPClassifier_Algorithms(x_train, y_train)
    models_score.append(model_score)
    
    print(6)    
    model_score=NaiveBayes_Algorithms(x_train, y_train)
    models_score.append(model_score)
    
    print(7)        
    model_score=SVM_Algorithms(x_train, y_train)
    models_score.append(model_score)

    print(8)    
    model_score=Bagging_Algorithms(x_train, y_train)
    models_score.append(model_score)

    return models_score
    
def RandomForestClassifier_Algorithms(x_train, y_train):
    #采用k折验证
    all_mae_histories = []
    k = 10
    num_val_samples = len(x_train) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = x_train[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [x_train[:i * num_val_samples],
             x_train[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [y_train[:i * num_val_samples],
             y_train[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        rfc=RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0)
        # 训练模式
        rfc.fit(partial_train_data, partial_train_targets)
        predictions = rfc.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均,没有使用
    average_mae_history = np.mean(all_mae_histories)
    save_model(rfc,models_name[0])
    return average_mae_history

def LogisticRegression_Algorithms(x_train, y_train):
    #采用k折验证
    all_mae_histories = []
    k = 10
    num_val_samples = len(x_train) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = x_train[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [x_train[:i * num_val_samples],
             x_train[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [y_train[:i * num_val_samples],
             y_train[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        lr=LogisticRegression(random_state=0)
        # 训练模式
        lr.fit(partial_train_data, partial_train_targets)
        predictions = lr.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均,没有使用
    average_mae_history = np.mean(all_mae_histories)
    save_model(lr,models_name[1])
    return average_mae_history

def kNN_Algorithms(train_data,train_targets):
    #K折验证，适用于数据集较少的数据集
    all_mae_histories = []
    k = 10
    num_val_samples = len(train_data) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [train_data[:i * num_val_samples],
             train_data[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [train_targets[:i * num_val_samples],
             train_targets[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        knn = KNeighborsClassifier(n_neighbors=4)
        # 训练模式
        knn.fit(partial_train_data, partial_train_targets)
        predictions = knn.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均
    average_mae_history = np.mean(all_mae_histories)
    save_model(knn,models_name[2])
    return average_mae_history

def DecisionTree_Algorithms(x_train, y_train):   
    #DecisionTree Algorithms
    all_mae_histories = []
    k = 10
    num_val_samples = len(x_train) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = x_train[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [x_train[:i * num_val_samples],
             x_train[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [y_train[:i * num_val_samples],
             y_train[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        dtc = DecisionTreeClassifier()
        # 训练模式
        dtc.fit(partial_train_data, partial_train_targets)
        predictions = dtc.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均,没有使用
    average_mae_history = np.mean(all_mae_histories)
    save_model(dtc,models_name[3])
    return average_mae_history
    
def MLPClassifier_Algorithms(x_train, y_train):    
    #MLPClassifier Algorithms
    seed = 7
    all_mae_histories = []
    k = 10
    num_val_samples = len(x_train) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = x_train[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [x_train[:i * num_val_samples],
             x_train[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [y_train[:i * num_val_samples],
             y_train[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        mlp = MLPClassifier(random_state=seed, solver='lbfgs')
        # 训练模式
        mlp.fit(partial_train_data, partial_train_targets)
        predictions = mlp.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均,没有使用
    average_mae_history = np.mean(all_mae_histories)
    save_model(mlp,models_name[4])
    return average_mae_history
     
def NaiveBayes_Algorithms(x_train, y_train):     
    #Naive Bayes Algorithms
    all_mae_histories = []
    k = 10
    num_val_samples = len(x_train) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = x_train[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [x_train[:i * num_val_samples],
             x_train[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [y_train[:i * num_val_samples],
             y_train[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        nb = GaussianNB()
        # 训练模式
        nb.fit(partial_train_data, partial_train_targets)
        predictions = nb.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均,没有使用
    average_mae_history = np.mean(all_mae_histories)
    save_model(nb,models_name[5])
    return average_mae_history

def SVM_Algorithms(x_train, y_train):         
    #SVM Algorithms
    all_mae_histories = []
    k = 10
    num_val_samples = len(x_train) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = x_train[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [x_train[:i * num_val_samples],
             x_train[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [y_train[:i * num_val_samples],
             y_train[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        svc = SVC(kernel='poly', gamma="auto")
        # 训练模式
        svc.fit(partial_train_data, partial_train_targets)
        predictions = svc.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均,没有使用
    average_mae_history = np.mean(all_mae_histories)
    save_model(svc,models_name[6])
    return average_mae_history

def Bagging_Algorithms(x_train, y_train):     
    #Naive Bayes Algorithms
    all_mae_histories = []
    k = 10
    num_val_samples = len(x_train) // k
    for i in range(k):
        # 准备验证数据，第K个分区的数据
        val_data = x_train[i * num_val_samples: (i + 1) * num_val_samples]
        val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]

        # 准备训练数据，其他所有分区的数据
        partial_train_data = np.concatenate(
            [x_train[:i * num_val_samples],
             x_train[(i + 1) * num_val_samples:]],
            axis=0)
        partial_train_targets = np.concatenate(
            [y_train[:i * num_val_samples],
             y_train[(i + 1) * num_val_samples:]],
            axis=0)
        # 构建 Keras 模型
        clfb = BaggingClassifier(base_estimator= DecisionTreeClassifier()
                         ,max_samples=0.5,max_features=0.5)
        # 训练模式
        clfb.fit(partial_train_data, partial_train_targets)
        predictions = clfb.predict(val_data)
        accuracy_aux = accuracy_score(val_targets, predictions)
        all_mae_histories.append(accuracy_aux)
    #K折验证分数平均,没有使用
    average_mae_history = np.mean(all_mae_histories)
    save_model(clfb,models_name[7])
    return average_mae_history

#保存训练好的模型
def save_model(model_temp,model_name):
    dirs = "../testModel"
    if not os.path.exists(dirs):
        os.makedirs(dirs)
    joblib.dump(model_temp,dirs+"/"+model_name+".model")


print(all_Algorithms())