import datetime
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
import torch
from tqdm import tqdm

from loadDataset import LoadModelParam
from modelSource import Multi_featureV2 as multi_person_feature

def test():
    data:np.ndarray= np.load(r"/资料/9深度学习/train_files/psd_feature.npy", allow_pickle=True)[:, [29, 30, 31]]
    label:np.ndarray=np.load(r"/资料/9深度学习/train_files/psd_feature-label.npy", allow_pickle=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    batch_size=100

    model = LoadModelParam.LoadModelParam("", multi_person_feature.multi_person_feature(1, 2)).GetModel().to(device)

    loss_fn = torch.nn.CrossEntropyLoss()
    # 优化器
    learning_rate = 0.01
    optim = torch.optim.SGD(model.parameters(), learning_rate, momentum=0.9)

    epoch = 100

    recordFile = open("./log/" + str(datetime.datetime.today()) + "record.txt", "w")

    lossArray = [0] * epoch
    accuracyArray = [0.] * epoch

    data:torch.Tensor=torch.from_numpy(data.reshape(-1,1,3,55))

    data:torch.Tensor=torch.cat((data,data),dim=2)

    rng=np.random.RandomState(9999)
    ind_shuf=list(range(data.shape[0]))

    rng.shuffle(ind_shuf)
    data=data[ind_shuf]
    label=label[ind_shuf]
    data=data[ind_shuf]
    label=label[ind_shuf]

    data:list=data.tolist()
    train_set_data = []
    train_set_data = data[int(len(label) * 0.3):int(len(label))]
    train_label = label[int(len(label) * 0.3):int(len(label))]
    train_set = TensorDataset(torch.Tensor(train_set_data).type(torch.float32),
                              torch.from_numpy(train_label).type(torch.LongTensor))
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=2)
    test_set_data = []
    test_set_data = data[int(len(label) * 0.2):int(len(label) * 0.3)]
    test_label = label[int(len(label) * 0.2):int(len(label) * 0.3)]
    valid_set_data = []
    valid_set_data = data[int(len(label) * 0):int(len(label) * 0.2)]
    valid_label = label[int(len(label) * 0):int(len(label) * 0.2)]
    valid_set = TensorDataset(torch.Tensor(valid_set_data).type(torch.float32),
                              torch.from_numpy(valid_label).type(torch.LongTensor))
    valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=True, num_workers=2)
    test_set = TensorDataset(torch.Tensor(test_set_data).type(torch.float32),
                             torch.from_numpy(test_label).type(torch.LongTensor))
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True, num_workers=2)

    for i in tqdm(range(epoch), desc="Training"):
        model.train()
        for data, label in train_loader:
            data = data.float().reshape(1, 1, 6, 240).to(device)
            label = label.long().to(device)
            # print(data)
            output = model(data)
            # print(output.argmax(1))
            # print(label)
            loss = loss_fn(output, label)  # label需要Long类型
            loss.backward()
            optim.step()
        total_test_loss = 0
        total_accuracy = 0
        model.eval()
        totalCount = 0
        with torch.no_grad():
            for data, label in valid_loader:
                data = data.float().reshape(1, 1, 6, 240).to(device)
                label = label.long().to(device)
                output = model(data)
                loss = loss_fn(output, label)
                # total_train_step+=1
                total_test_loss += loss
                # print(loss)
                accuracy = (output.argmax(1) == label).sum().item()
                total_accuracy += accuracy
                totalCount += 1
        # print("Epoch：{}，整体测试集上的Loss:{}".format(i, total_test_loss))
        # print("Epoch：{}，整体测试集上的Accuracy:{}".format(i, total_accuracy / totalCount))
        lossArray[i] = total_test_loss
        accuracyArray[i] = total_accuracy / totalCount
        recordFile.write("Epoch：{}，整体测试集上的Loss:{}\n".format(i, total_test_loss))
        recordFile.write("Epoch：{}，整体测试集上的Accuracy:{}\n".format(i, total_accuracy / totalCount))
        recordFile.write("-------------------------------------\n")
        torch.save(model, "./model/test_{}_{:5f}.pth".format(i, total_accuracy / totalCount))
        # total_test_step += 1
    recordFile.close()

    total_test_loss = 0
    total_accuracy = 0
    totalCount = 0
    model.eval()

    with torch.no_grad():
        for data, label in valid_loader:
            data = data.float().reshape(1, 1, 6, 240).to(device)
            label = label.long().to(device)
            output = model(data)
            loss = loss_fn(output, label)
            # total_train_step+=1
            total_test_loss += loss
            # print(loss)
            accuracy = (output.argmax(1) == label).sum().item()
            total_accuracy += accuracy
            totalCount += 1

    print(total_accuracy / totalCount)