import numpy as np  # linear algebra
import pandas as pd  # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt

from sklearn.svm import LinearSVC
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split, cross_val_score

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader

import math

from tqdm import tqdm

import itertools

from static.datasets_LFDS import MyDataset
from network import NerualNet_yieldRate
import autoEncoder

from loss import weighted_mse_loss, weighted_huber_loss

n_category=3

# ['none', 'sqrt_inv', 'inverse']
# ['gaussian', 'triang', 'laplace']
reweight='inverse'
lds=True
lds_kernel='gaussian'
lds_ks=3
lds_sigma=0.5
fds=False
bucket_num=10
bucket_start=3
start_update=0
start_smooth=1
fds_kernel='gaussian'
fds_ks=3
fds_sigma=0.5


def r2_loss(output, target, weights=None):
    # target_mean = torch.mean(target)
    # ss_tot = torch.sum((target - target_mean) ** 2)
    # ss_res = torch.sum((target - output) ** 2)
    # r2 = ss_res / ss_tot - 1
    # return r2

    target_mean = torch.mean(target)
    ss_tot = abs(target - target_mean)*weights
    ss_res = abs(target - output)
    r2 = torch.div(ss_res , ss_tot)
    return torch.mean(r2)

def self_loss(output, target, weights=None):
    gap=output>1
    if output>=1:
        return

def train(dataPath, modelSavePath):
    print('\ntraining: ', dataPath)

    dataset = pd.read_csv(dataPath)
    X = dataset.iloc[:, :-1]
    # y=dataset['L']
    y = dataset.iloc[:, -1]
    # normalize
    X_scaled = preprocessing.StandardScaler().fit_transform(X)
    # X_scaled = preprocessing.MinMaxScaler().fit_transform(X)
    # X_scaled = preprocessing.MaxAbsScaler().fit_transform(X)
    # X_scaled = preprocessing.Normalizer().fit_transform(X)


    # y_a=y.values.reshape(-1,1)
    # y = preprocessing.Normalizer().fit_transform(y)
    # y = preprocessing.MinMaxScaler().fit_transform(y.values.reshape(-1,1))
    # Splitting the dataset into the Training set and Test set
    X_train, X_test, y_train, y_test = train_test_split(X_scaled, y.values, test_size=0.2, random_state=3)
    # X_train, X_test, y_train, y_test = train_test_split(X.values, y, test_size=0.2, random_state=3)
    X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=2/8, random_state=0)

    print('\ntrain image shape : ', X_train.shape)
    print('train label shape : ', y_train.shape)
    print('valid image shape : ', X_valid.shape)
    print('valid label image : ', y_valid.shape)
    print('test image shape  : ', X_test.shape)
    print('test label image : ', y_test.shape)

    train_dataset = MyDataset(X=X_train, y=y_train, reweight=reweight, lds=lds, lds_kernel=lds_kernel, lds_ks=lds_ks, lds_sigma=lds_sigma)
    valid_dataset = MyDataset(X=X_valid, y=y_valid)
    test_dataset = MyDataset(X=X_test, y=y_test)


    train_loader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=False)
    valid_loader = DataLoader(dataset=valid_dataset, batch_size=128, shuffle=False)
    test_loader = DataLoader(dataset=test_dataset, batch_size=128, shuffle=False)

    model = NerualNet_yieldRate(fds=fds, bucket_num=bucket_num, bucket_start=bucket_start, start_update=start_update, start_smooth=start_smooth,
                 kernel=fds_kernel, ks=fds_ks, sigma=fds_sigma)
    print('\nmodel: ')
    print(model)

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    # loss_fn = nn.CrossEntropyLoss()
    loss_fn = nn.MSELoss()
    # loss_fn = nn.NLLLoss()

    mean_train_losses = {'MSE': [], 'MAE': [], 'R2': []}
    mean_valid_losses = {'MSE': [], 'MAE': [], 'R2': []}

    # mean_train_losses = []
    # mean_valid_losses = []
    epochs = 3000

    y_tarin_pred=[]
    for epoch in range(epochs):
        model.train()

        train_losses = {'MSE': [], 'MAE': [], 'R2': []}
        valid_losses = {'MSE': [], 'MAE': [], 'R2': []}
        # train_losses = []
        # valid_losses = []
        for i, (data, labels, weights) in enumerate(train_loader):
            data = data.float()
            labels = labels.float()
            optimizer.zero_grad()

            # output = model(data)
            if fds:
                output, _ = model(data, labels, epoch=epoch, training=True)
            else:
                output = model(data, labels, epoch=epoch, training=True)
            if epoch==epochs-1:
                curOutput=output.detach().numpy().tolist()
                # y_tarin_pred.append(curOutput)
                y_tarin_pred+=curOutput

            # loss=r2_loss(output, labels, weights)
            # loss=self_loss(output, labels, weights)
            # loss = loss_fn(output, labels)
            loss = weighted_mse_loss(output, labels, weights)
            # loss = weighted_huber_loss(output, labels, weights)
            loss.backward()
            optimizer.step()

            # train_losses.append(loss.item())

            MAE = mean_absolute_error(output.detach().numpy(), labels.detach().numpy())
            R2_Score = r2_score(output.detach().numpy(), labels.detach().numpy())
            train_losses['MSE'].append(loss.item())
            train_losses['MAE'].append(MAE)
            train_losses['R2'].append(R2_Score)

        if (epoch+1)%100==0:
            print('loss: ',loss)
        if fds and epoch >= start_update:
            # print(f"Create Epoch [{epoch}] features of all training data...")
            encodings, labels = [], []
            with torch.no_grad():
                for i, (inputs, targets, _) in enumerate(train_loader):
                    # inputs = inputs.cuda(non_blocking=True)
                    inputs=inputs.float()
                    outputs, feature = model(inputs, targets, epoch)
                    encodings.extend(feature.data.squeeze().cpu().numpy())
                    labels.extend(targets.data.squeeze().cpu().numpy())

            encodings, labels = torch.from_numpy(np.vstack(encodings)), torch.from_numpy(
                np.hstack(labels))
            model.FDS.update_last_epoch_stats(epoch)
            model.FDS.update_running_stats(encodings, labels, epoch)

        model.eval()
        with torch.no_grad():
            for i, (data, labels, _) in enumerate(valid_loader):
                data = data.float()
                labels = labels.float()

                # outputs = model(data)
                outputs = model(data, training=False)
                # loss = loss_fn(outputs, labels)

                MSE=mean_squared_error(outputs.detach().numpy(), labels.detach().numpy())
                MAE = mean_absolute_error(outputs.detach().numpy(), labels.detach().numpy())
                R2_Score = r2_score(outputs.detach().numpy(), labels.detach().numpy())
                valid_losses['MSE'].append(MSE)
                valid_losses['MAE'].append(MAE)
                valid_losses['R2'].append(R2_Score)

        mean_train_losses['MSE'].append(np.mean(train_losses['MSE']))
        mean_train_losses['MAE'].append(np.mean(train_losses['MAE']))
        mean_train_losses['R2'].append(np.mean(train_losses['R2']))
        mean_valid_losses['MSE'].append(np.mean(valid_losses['MSE']))
        mean_valid_losses['MAE'].append(np.mean(valid_losses['MAE']))
        mean_valid_losses['R2'].append(np.mean(valid_losses['R2']))

        # mean_train_losses.append(np.mean(train_losses))
        # mean_valid_losses.append(np.mean(valid_losses))

        if epoch % 100 == 0:
            print('epoch : {}, train loss : {:.4f}, {:.4f}, {:.4f}; valid loss : {:.4f}, {:.4f}, {:.4f}'.format(epoch + 1, np.mean(
            train_losses['MSE']), np.mean(train_losses['MAE']), np.mean(train_losses['R2']), np.mean(
            valid_losses['MSE']), np.mean(valid_losses['MAE']), np.mean(valid_losses['R2'])))

    a=np.array(y_tarin_pred)
    plt.plot(y_train, label='y_train')
    plt.plot(a, label='y_tarin_pred')
    plt.title('ours_train', fontsize='large', fontweight = 'bold')
    plt.legend()
    plt.show()

    fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(30, 10))
    ax1.plot(mean_train_losses['MSE'], label='train')
    ax1.plot(mean_valid_losses['MSE'], label='valid')
    lines, labels = ax1.get_legend_handles_labels()
    ax1.legend(lines, labels, loc='best')
    ax2.plot(mean_train_losses['MAE'], label='train')
    ax2.plot(mean_valid_losses['MAE'], label='valid')
    lines, labels = ax2.get_legend_handles_labels()
    ax2.legend(lines, labels, loc='best')
    ax3.plot(mean_train_losses['R2'], label='train')
    ax3.plot(mean_valid_losses['R2'], label='valid')
    lines, labels = ax3.get_legend_handles_labels()
    ax3.legend(lines, labels, loc='best')

    plt.show()

    torch.save(model, modelSavePath)

    model=torch.load(modelSavePath)
    test_losses = {'MSE': [], 'MAE': [], 'R2': []}
    with torch.no_grad():
        for i, (data, labels, _) in enumerate(test_loader):
            data = data.float()
            labels = labels.float()

            # data, _ = Coder(data)

            # outputs = model(data)
            outputs = model(data, training=False)
            # loss = loss_fn(outputs, labels)

            MSE = mean_squared_error(outputs.detach().numpy(), labels.detach().numpy())
            MAE = mean_absolute_error(outputs.detach().numpy(), labels.detach().numpy())
            R2_Score = r2_score(outputs.detach().numpy(), labels.detach().numpy())
            test_losses['MSE'].append(MSE)
            test_losses['MAE'].append(MAE)
            test_losses['R2'].append(R2_Score)

    mean_train_losses['MSE'].append(np.mean(test_losses['MSE']))
    mean_train_losses['MAE'].append(np.mean(test_losses['MAE']))
    mean_train_losses['R2'].append(np.mean(test_losses['R2']))
    mean_valid_losses['MSE'].append(np.mean(test_losses['MSE']))
    mean_valid_losses['MAE'].append(np.mean(test_losses['MAE']))
    mean_valid_losses['R2'].append(np.mean(test_losses['R2']))

    print('\ntest: ')
    # MSE = mean_squared_error(y_test, y_pred)
    # MAE = mean_absolute_error(y_test, y_pred)
    # R2_Score = r2_score(y_test, y_pred)
    print("MSE: ", np.mean(test_losses['MSE']))
    print("MAE: ", np.mean(test_losses['MAE']))
    print("R2_Score: ", np.mean(test_losses['R2']))


if __name__ == '__main__':
    train('../../data/ingotRate.csv', './ingotRatePrediction.pkl')
    # train('../../data/ingotRate_average.csv','./ingotRatePrediction_average.pkl')







# 先用kmeans分类，再在类内KNN回归
# def train(dataPath, modelSavePath):
#     print('\ntraining: ', dataPath)
#
#     dataset = pd.read_csv(dataPath)
#     # X = dataset.iloc[:, :-1]
#     # y_category = dataset['category']
#     # y_L=dataset['L']
#
#     dataset=dataset.values
#
#     # normalize
#     # X_scaled = preprocessing.StandardScaler().fit_transform(X)
#     # X_scaled = preprocessing.MinMaxScaler().fit_transform(X)
#     # X_scaled = preprocessing.MaxAbsScaler().fit_transform(X)
#     # X_scaled = preprocessing.Normalizer().fit_transform(X)
#
#     # poly = PolynomialFeatures(degree=2)
#     # X_scaled=poly.fit_transform(X_scaled)
#
#     # 交叉特征
#     # X_scaled=X_scaled.tolist()
#     # for i in range(len(X_scaled)):
#     #     cmpnt_weight = X_scaled[i][0:1]
#     #     cmpnt_content = X_scaled[i][1:4]
#     #     cmpnt_surface = X_scaled[i][4:8]
#     #     cmpnt_element = X_scaled[i][8:14]
#     #     cmpnt_cs=[v[0]*v[1] for v in itertools.product(cmpnt_content,cmpnt_surface)]
#     #     cmpnt_ce=[v[0]*v[1] for v in itertools.product(cmpnt_content,cmpnt_element)]
#     #     cmpnt_se=[v[0]*v[1] for v in itertools.product(cmpnt_surface,cmpnt_element)]
#     #     X_scaled[i]=cmpnt_weight+cmpnt_content+cmpnt_surface+cmpnt_element+cmpnt_cs+cmpnt_ce+cmpnt_se
#     #     # X_scaled[i]=cmpnt_content+cmpnt_surface+cmpnt_element+cmpnt_cs+cmpnt_ce+cmpnt_se
#     # X_scaled=np.array(X_scaled,dtype=float)
#
#
#
#     dataset_category=[[] for _ in range(0,n_category)]
#     for i in range(0, dataset.shape[0]):
#         dataset_category[int(dataset[i][-1])].append(dataset[i])
#
#     # transform to numpy array
#     # for i in range(0, len(dataset_category)):
#     #     dataset_category[i]=np.array(dataset_category[i])
#
#     dataset_splited = {"train":None,"valid":None,"test":None}
#     for i in range(0, n_category):
#         dataset_train, dataset_test = train_test_split(dataset_category[i], test_size=0.2, random_state=0)
#         dataset_train, dataset_valid = train_test_split(dataset_train, test_size=0.25, random_state=0)
#         if i==0:
#             dataset_splited["train"]=dataset_train
#             dataset_splited["valid"]=dataset_valid
#             dataset_splited["test"]=dataset_test
#         else:
#             dataset_splited["train"] += dataset_train
#             dataset_splited["valid"] += dataset_valid
#             dataset_splited["test"] += dataset_test
#
#     # transform to numpy array
#     dataset_splited["train"]=np.array(dataset_splited["train"])
#     dataset_splited["valid"]=np.array(dataset_splited["valid"])
#     dataset_splited["test"]=np.array(dataset_splited["test"])
#
#     np.random.seed(0)
#     np.random.shuffle(dataset_splited["train"])
#     np.random.seed(0)
#     np.random.shuffle(dataset_splited["valid"])
#     np.random.seed(0)
#     np.random.shuffle(dataset_splited["test"])
#
#     X_train=dataset_splited["train"][:, :-2]
#     y_L_train=dataset_splited["train"][:, -2]
#     y_category_train=dataset_splited["train"][:, -1]
#     X_valid=dataset_splited["valid"][:, :-2]
#     y_L_valid=dataset_splited["valid"][:, -2]
#     y_category_valid=dataset_splited["valid"][:, -1]
#     X_test=dataset_splited["test"][:, :-2]
#     y_L_test=dataset_splited["test"][:, -2]
#     y_category_test=dataset_splited["test"][:, -1]
#
#
#
#
#     # Splitting the dataset into the Training set and Test set
#     # X_train, X_test, y_category_train, y_category_test, y_L_train, y_L_test = train_test_split(X.values, y_category.values, y_L.values, test_size=0.2, random_state=0)
#
#     # 训练autoencoder
#     # train_dataset = MyDataset(X=X_train, y=y_train)
#     # autoEncoder.trainAutoEncoder(train_dataset)
#     # Coder = torch.load('./AutoEncoder.pkl')
#
#     # X_train, X_valid, y_category_train, y_category_valid, y_L_train, y_L_valid = train_test_split(X_train, y_category_train, y_L_train, test_size=0.25, random_state=0)
#
#     print('\ntrain image shape : ', X_train.shape)
#     print('train label shape : ', y_category_train.shape)
#     print('valid image shape : ', X_valid.shape)
#     print('valid label image : ', y_category_valid.shape)
#     print('test image shape  : ', X_test.shape)
#     print('test label image : ', y_category_test.shape)
#
#     # train_dataset = MyDataset(X=X_train, y=y_category_train)
#     # valid_dataset = MyDataset(X=X_valid, y=y_category_valid)
#     # test_dataset = MyDataset(X=X_test, y=y_category_test)
#
#     train_dataset = MyDataset(X=X_train, y=y_L_train)
#     valid_dataset = MyDataset(X=X_valid, y=y_L_valid)
#     test_dataset = MyDataset(X=X_test, y=y_L_test)
#
#
#     train_loader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=False)
#     valid_loader = DataLoader(dataset=valid_dataset, batch_size=128, shuffle=False)
#     test_loader = DataLoader(dataset=test_dataset, batch_size=128, shuffle=False)
#
#     model = NerualNet_yieldRate()
#     print('\nmodel: ')
#     print(model)
#
#     optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
#     # loss_fn = nn.CrossEntropyLoss()
#     loss_fn = nn.MSELoss()
#     # loss_fn = nn.NLLLoss()
#
#     mean_train_losses = {'MSE': [], 'MAE': [], 'R2': []}
#     mean_valid_losses = {'MSE': [], 'MAE': [], 'R2': []}
#
#     # mean_train_losses = []
#     # mean_valid_losses = []
#     valid_acc_list = []
#     epochs = 1000
#
#     y_tarin_pred=[]
#     validLabels=[]
#     for epoch in range(epochs):
#         model.train()
#
#         train_losses = {'MSE': [], 'MAE': [], 'R2': []}
#         valid_losses = {'MSE': [], 'MAE': [], 'R2': []}
#         # train_losses = []
#         # valid_losses = []
#         for i, (data, labels) in enumerate(train_loader):
#             data = data.float()
#             labels = labels.float()
#
#             # data, _ = Coder(data)
#
#             optimizer.zero_grad()
#
#             output = model(data)
#             if epoch==epochs-1:
#                 curOutput=output.detach().numpy().tolist()
#                 y_tarin_pred.append(curOutput)
#
#             loss = loss_fn(output, labels)
#             # loss = loss_fn(output, labels.long())
#             loss.backward()
#             optimizer.step()
#
#             # train_losses.append(loss.item())
#
#             MAE = mean_absolute_error(output.detach().numpy(), labels.detach().numpy())
#             R2_Score = r2_score(output.detach().numpy(), labels.detach().numpy())
#             train_losses['MSE'].append(loss.item())
#             train_losses['MAE'].append(MAE)
#             train_losses['R2'].append(R2_Score)
#
#         model.eval()
#         correct = 0
#         total = 0
#         with torch.no_grad():
#             for i, (data, labels) in enumerate(valid_loader):
#                 data = data.float()
#                 labels = labels.float()
#
#                 # data, _ = Coder(data)
#
#                 outputs = model(data)
#                 loss = loss_fn(outputs, labels)
#                 # loss = loss_fn(output, labels.long())
#
#                 # valid_losses.append(loss.item())
#
#                 # _, predicted = torch.max(outputs.data, 1)
#                 # correct += (predicted == labels).sum().item()
#                 # total += labels.size(0)
#                 # validLabels=predicted.detach().numpy()
#
#                 MAE = mean_absolute_error(outputs.detach().numpy(), labels.detach().numpy())
#                 R2_Score = r2_score(outputs.detach().numpy(), labels.detach().numpy())
#                 valid_losses['MSE'].append(loss.item())
#                 valid_losses['MAE'].append(MAE)
#                 valid_losses['R2'].append(R2_Score)
#
#         mean_train_losses['MSE'].append(np.mean(train_losses['MSE']))
#         mean_train_losses['MAE'].append(np.mean(train_losses['MAE']))
#         mean_train_losses['R2'].append(np.mean(train_losses['R2']))
#         mean_valid_losses['MSE'].append(np.mean(valid_losses['MSE']))
#         mean_valid_losses['MAE'].append(np.mean(valid_losses['MAE']))
#         mean_valid_losses['R2'].append(np.mean(valid_losses['R2']))
#
#         # mean_train_losses.append(np.mean(train_losses))
#         # mean_valid_losses.append(np.mean(valid_losses))
#
#         if epoch % 100 == 0:
#             # accuracy = 100 * correct / total
#             # valid_acc_list.append(accuracy)
#             # print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}%' \
#             #       .format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), accuracy))
#             print('epoch : {}, train loss : {:.4f}, {:.4f}, {:.4f}; valid loss : {:.4f}, {:.4f}, {:.4f}'.format(epoch + 1, np.mean(
#             train_losses['MSE']), np.mean(train_losses['MAE']), np.mean(train_losses['R2']), np.mean(
#             valid_losses['MSE']), np.mean(valid_losses['MAE']), np.mean(valid_losses['R2'])))
#
#     # a=np.array(y_tarin_pred)
#     # plt.plot(y_train, label='y_train')
#     # plt.plot(a.mean(axis=0), label='y_tarin_pred')
#     # plt.title('ours_train', fontsize='large', fontweight = 'bold')
#     # plt.legend()
#     # plt.show()
#
#
#     # fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15, 10))
#     # ax1.plot(mean_train_losses, label='train')
#     # ax1.plot(mean_valid_losses, label='valid')
#     # lines1, labels1 = ax1.get_legend_handles_labels()
#     # ax1.legend(lines1, labels1, loc='best')
#     #
#     # ax2.plot(valid_acc_list, label='valid acc')
#     # lines2, labels2 = ax1.get_legend_handles_labels()
#     # ax2.legend(lines2, labels2, loc='best')
#
#
#     fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(30, 10))
#     ax1.plot(mean_train_losses['MSE'][50:], label='train')
#     ax1.plot(mean_valid_losses['MSE'][50:], label='valid')
#     lines, labels = ax1.get_legend_handles_labels()
#     ax1.legend(lines, labels, loc='best')
#     ax2.plot(mean_train_losses['MAE'][50:], label='train')
#     ax2.plot(mean_valid_losses['MAE'][50:], label='valid')
#     lines, labels = ax2.get_legend_handles_labels()
#     ax2.legend(lines, labels, loc='best')
#     ax3.plot(mean_train_losses['R2'][50:], label='train')
#     ax3.plot(mean_valid_losses['R2'][50:], label='valid')
#     lines, labels = ax3.get_legend_handles_labels()
#     ax3.legend(lines, labels, loc='best')
#
#     plt.show()
#
#     torch.save(model, modelSavePath)
#
#     # trainLabels=y_category_train.tolist()
#     # validLabels=validLabels.tolist()
#     #
#     # KNNTrainData=[[] for _ in range(0,n_category)]
#     # KNNTrainLabels=[[] for _ in range(0,n_category)]
#     # KNNValidData=[[] for _ in range(0,n_category)]
#     # KNNValidLabels=[[] for _ in range(0,n_category)]
#     # for i in range(0,len(trainLabels)):
#     #     KNNTrainData[int(trainLabels[i])].append(X_train.tolist()[i])
#     #     KNNTrainLabels[int(trainLabels[i])].append(y_L_train.tolist()[i])
#     # for i in range(0,len(validLabels)):
#     #     KNNValidData[int(validLabels[i])].append(X_valid.tolist()[i])
#     #     KNNValidLabels[int(validLabels[i])].append(y_L_valid.tolist()[i])
#
#
#     # clf=LinearSVC(random_state=0)
#     # clf.fit(X_train,y_category_train)
#     # scores = cross_val_score(clf, np.append(X_train,X_valid,axis=0), np.append(y_category_train,y_category_valid,axis=0), cv=10, scoring='accuracy')
#     # print('LinearSVC acc: ',scores.mean())
#     # # predictions=clf.predict(X_valid)
#     # # scores=clf.score(X_valid, y_category_valid, sample_weight=None)
#     #
#     trainLabels=y_category_train.tolist()
#     validLabels=y_category_valid.tolist()
#
#     KNNTrainData=[[] for _ in range(0,n_category)]
#     KNNTrainLabels=[[] for _ in range(0,n_category)]
#     KNNValidData=[[] for _ in range(0,n_category)]
#     KNNValidLabels=[[] for _ in range(0,n_category)]
#     for i in range(0,len(trainLabels)):
#         KNNTrainData[int(trainLabels[i])].append(X_train.tolist()[i])
#         KNNTrainLabels[int(trainLabels[i])].append(y_L_train.tolist()[i])
#     for i in range(0,len(validLabels)):
#         KNNValidData[int(validLabels[i])].append(X_valid.tolist()[i])
#         KNNValidLabels[int(validLabels[i])].append(y_L_valid.tolist()[i])
#
#     for i in range(n_category):
#         knn.train(np.array(KNNTrainData[i]), np.array(KNNTrainLabels[i]), np.array(KNNValidData[i]), np.array(KNNValidLabels[i]), i)