import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from mpl_toolkits.mplot3d import Axes3D

def errCounter(res,standard):
    errRate=0
    for i in range(res.shape[0]):
        if res[i]!=standard[i]:
            errRate+=1
            
    return errRate/res.shape[0]

from basic import simplePCA,file2fullMatrix,autoNorm
from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier    #k近邻
from sklearn.linear_model import LogisticRegression   #逻辑回归
from sklearn.svm import SVC                         #支持向量机
from sklearn.tree import DecisionTreeClassifier     #决策树
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier  #随机森林  #adaboost
from sklearn.naive_bayes import GaussianNB          #朴素贝叶斯
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis   #二次判别分析
# from sklearn.neural_network import BernoulliRBM
# from sklearn.gaussian_process import GaussianProcess



# names = ["Knn", "LogisticRegression","SVM1",'SVM2'
#          "Decision_tree", "RandomForest", "AdaBoost",
#          "NaiveBayes", "QDA"]


names = ["Knn", "LogisticRegression",
         "Decision_tree", "RandomForest", "AdaBoost",
         "NaiveBayes", "QDA"]




classifiers = [
    KNeighborsClassifier(200),
    LogisticRegression(),
    # SVC(kernel="linear", C=0.025),
    # SVC(gamma=2, C=1),
    #SVC(kernal='rbf',probablity=True)
    DecisionTreeClassifier(max_depth=5),
    RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
    AdaBoostClassifier(),
    GaussianNB(),
    QuadraticDiscriminantAnalysis(),
    #BernoulliRBM(),
    #GaussianProcess(),
]

filename="pd_speech_features.csv"

# test_sizes=[0.05,0.1,0.15,0.2,0.25,0.3]
# dimensions=[100,400,700]

test_sizes=[0.05,0.1,0.15,0.2,0.25,0.3]
dimensions=[600,500,400,300,200,100]

x=test_sizes*6
x.sort()

y=dimensions*6




feature,target,feature_name=file2fullMatrix(filename)
feature=autoNorm(feature)


fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
ax.set_title("Appearence Of Different Classifiers")


for i in range(len(classifiers)):
    z=[]

    for dimen in dimensions:
        copyThing=simplePCA(feature,dimen)

        for size in test_sizes:
            x_train,x_test,y_train,y_test=train_test_split(copyThing,target,test_size=size)

            classifiers[i].fit(x_train,y_train)

            res=classifiers[i].predict(x_test)

            errRate=errCounter(res,y_test)
            z.append(errRate)


    ax.plot(x,y,z,label=names[i],alpha=0.4)



plt.legend()
plt.tight_layout()
plt.show()







