import os
import torch
import numpy as np
from torch.utils.data import DataLoader,Dataset
from torchvision import transforms
from sklearn.model_selection import train_test_split
os.chdir('Task4-SVM')
from model.SVM import *

# 设定训练用的设备
device = "cuda" if torch.cuda.is_available() else "cpu"
# 打印看一下
print("Using {} device".format('cuda'))

data = torch.from_numpy(np.load('dataset/standard/data.npy')).to(device)/255.0
label = torch.from_numpy(np.load('dataset/standard/label.npy')).to(device).to(torch.int64)
#label[label!=4] = 1 
#label[label==4] = 0 
group = torch.from_numpy(np.load('dataset/standard/group.npy')).to(device)

data_train,data_test,label_train,label_test = train_test_split(data,label,train_size=0.7)

# 自定义dataset和数据集预处理
preprocess = transforms.Compose([
    transforms.ToTensor(),
    # transforms.Normalize(mean=0, std=1)
])

class SIM_Dataset(Dataset):
    def __init__(self, inputs, targets, transform):
        self.inputs = inputs
        self.targets = targets
        self.transform = transform

    def __len__(self):
        return len(self.targets)

    def __getitem__(self, item):
        img = self.inputs[item]
        target = self.targets[item]
        # input = self.transform(input)
        return img, target

# 实例化dataset
training_data = SIM_Dataset(data_train, label_train, transform=preprocess)
test_data = SIM_Dataset(data_test, label_test, transform=preprocess)
# 定义dataloader
batch_size = 32
train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)

model = OVA_SVMs(
    width=403,
    height=755,
    intolerance_rate=1,
    class_num=5
).to(device)
epochs = 1001 #定义训练轮数

#损失函数
loss_fn = torch.nn.CrossEntropyLoss()
#学习率定义
learning_rate = 5e-4 #定义学习率
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) #定义优化器
ExpLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i*50 for i in range(20)], gamma=0.5) #绑定衰减学习率到优化器

# 定义训练循环
def train_loop(dataloader, model, optimizer):
    size = len(dataloader.dataset)
    for batch, (X, y) in enumerate(dataloader):
        #X = X.to(device)
        #y = y.to(device)
        # Compute prediction and loss
        loss = model(X,y)
        # pred = model.classify(X).to(device)

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch % 2 == 0:
            loss, current = loss.item(), batch * dataloader.batch_size
            #print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")

# 定义测试循环
def test_loop(dataloader, model, set_name, p):
    size = len(dataloader.dataset)
    test_loss, correct = 0, 0

    with torch.no_grad():
        for X, y in dataloader:
            #X=X.to(device)
            #y=y.to(device)
            test_loss += model(X,y)
            pred = model.classify(X).to(device)
            #test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()

    test_loss /= size
    correct /= size
    if p:
        print(f"{set_name} Accuracy: {(100*correct):>0.3f}%, Avg loss: {test_loss:>8f}")
    return test_loss

for t in range(epochs): # 开始训练
    new_lr=ExpLR.get_last_lr()[0]
    if t % 100 == 0:
        print(f"-----------------\nEpoch {t}")
        p=True
    #print(f'lr: {new_lr:>7e}')
    train_loop(train_dataloader, model, optimizer)
    test_loop(train_dataloader, model, 'Train', p)
    test_loop(test_dataloader, model, ' Test', p)
    p=False
    ExpLR.step()
print("Done!")