from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, Conv1D, Flatten, MaxPool2D
import matplotlib.pyplot as plt
import numpy as np


# 随机森林
def random_forest(original_data, original_X, original_Y, combined_training_data, x_train, x_test, y_train, y_test):
    # 初始化模型，n_estimators:决策树的数量,max_depth：树的最大深度,random_state：随机数种子
    clf = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0)
    # 对训练集进行拟合
    clf.fit(x_train, y_train.values.ravel())
    return clf


# 支持向量机
def svm(original_data, original_X, original_Y, combined_training_data, x_train, x_test, y_train, y_test):
    # 核函数使用默认函数，即特征值平均
    clf = SVC(gamma='auto')
    clf.fit(x_train, y_train.values.ravel())
    return clf


# 决策树模型
def decision_tree(original_data, original_X, original_Y, combined_training_data, x_train, x_test, x_val, y_train, y_test,y_val):
    clf = tree.DecisionTreeClassifier(max_depth=5)
    clf.fit(x_train, y_train.values.ravel())
    return clf


# 朴素贝叶斯算法
def nb(original_data, original_X, original_Y, combined_training_data, x_train, x_test, y_train, y_test):
    clf = MultinomialNB()
    # MultinomialNB要求训练集中不出现负值,将训练数据归一化，使用preprocessing.MinMaxScaler来处理。
    from sklearn.preprocessing import MinMaxScaler
    scaler = MinMaxScaler()
    scaler.fit(x_train)
    X_train_scaled = scaler.transform(x_train)
    clf.fit(X_train_scaled, y_train.values.ravel())
    # clf.fit(x_train, y_train.values.ravel())
    return clf


# KNN
def knn(original_data, original_X, original_Y,combined_training_data,x_train1,x_train2,x_train,x_test,x_val,y_train1,y_train2,y_train,y_test,y_val):
    clf = KNeighborsClassifier(n_neighbors=4)
    clf.fit(x_train, y_train.values.ravel())
    return clf

# Adaboost
def ada(original_data, original_X, original_Y,combined_training_data,x_train1,x_train2,x_train,x_test,x_val,y_train1,y_train2,y_train,y_test,y_val):
    clf = AdaBoostClassifier()
    clf.fit(x_train, y_train.values.ravel())

def NN():

    import keras
    from keras.models import Sequential
    from keras.layers import Dense

    # Initialising the ANN
    classifier = Sequential()

    # Adding the input layer and the first hidden layer
    # classifier.add(Dense(output_dim = 15, init = 'uniform', activation = 'relu', input_dim = len(original_X.columns)))
    # output_dim 在新版本就是units
    # 将init改为kernel_initializer
    classifier.add(Dense(units=15, kernel_initializer='uniform', activation='relu'))
    # Adding the second hidden layer
    # classifier.add(Dense(output_dim = 8, init = 'uniform', activation = 'relu'))
    classifier.add(Dense(units=8, kernel_initializer='uniform', activation='relu'))
    # classifier.add(Dense(output_dim = 5, init = 'uniform', activation = 'relu'))
    classifier.add(Dense(units=5, kernel_initializer='uniform', activation='relu'))
    # Adding the output layer
    # classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
    classifier.add(Dense(units=2, kernel_initializer='uniform', activation='sigmoid'))
    # Compiling the ANN

    classifier.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    # # Fitting the ANN to the Training set
    # # classifier.fit(x_train, y_train, batch_size=10, nb_epoch=100)
    # # nb_epoch改为epochs
    # classifier.fit(x_train, y_train, batch_size=10, epochs=100)
    #
    # # Making the predictions and evaluating the model
    # # Predicting the Test set results
    # y_pred = classifier.predict(x_val)
    # y_pred = (y_pred > 0.5)
    # y_pred = pd.DataFrame(y_pred, columns=['defects'])
    # # Making the Confusion Matrix
    # from sklearn.metrics import confusion_matrix
    # cm = confusion_matrix(y_val, y_pred)
    # from sklearn.metrics import accuracy_score
    # accuracy_score(y_val, y_pred)

    return classifier


def cnn():
    from keras.models import Sequential
    from keras.layers import Dense, Conv2D, Flatten

    model = Sequential()

    # add model layers
    model.add(Conv2D(32, kernel_size=4, activation='relu'))
    model.add(Conv2D(16, kernel_size=4, activation='relu'))
    model.add(Conv2D(8, kernel_size=4, activation='relu'))

    #   model.add(MaxPool2D(pool_size=(1,8)))
    # model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    # compile model using accuracy to measure model performance
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

    return model

# print(preprocess.my_sdp_preprocessor('Data/data/pc2.csv'))
# 接收一个tuple，需要前面加*才能按顺序填参数
# NN_clf = NN(*preprocess.my_sdp_preprocessor('Data/data/pc2.csv'))
# rf_clf = random_forest(*preprocess.my_sdp_preprocessor('Data/data/pc2.csv'))
# svm_clf = svm(*preprocess.my_sdp_preprocessor('Data/data/pc2.csv'))
# cnn_clf = cnn(*preprocess.my_sdp_preprocessor('Data/data/pc2.csv'))

# 本机python版本3.9
# 本机tensorflow版本2.6.0
# keras版本不能高于tensorflow版本