import os
import pickle
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split

class train_model():

    def exit_model(self):  ##
        if os.exists("./data/train_data") and os.exists("./data/lablae"):
           return True
        else:
            return False
    def KNN(self,z):
        # if not self.exit_model():
        #     return False
        with open("./data/train_data", 'rb') as f:
            x = pickle.load(f)
        h, w = np.shape(x)
        print(h,w)
        y = x[w - 1]
        X = x.drop([w - 1], axis=1)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
        print("####开始训练######")
        acc_list = []
        z[0]=0
        for k in range(1,int(h*0.75)):
            knn = KNeighborsClassifier(n_neighbors=k)
            knn.fit(X_train, y_train.astype('int'))
            acc = knn.score(X_test, y_test.astype('int'))  # 4. 获得算法的准确率
            acc_list.append(acc)
            z[0]=z[0]+1
        with open("./modle/knn_max", 'wb') as f:
            pickle.dump(knn, f)
        with open("./modle/knn_data", 'wb') as f:
            pickle.dump(X_train, f)
        return np.max(acc_list)   ### 模型最大准确率
    def random_forest(self):
        pass
    def DB(self):
        pass