import torch
from sklearn.metrics import classification_report
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.data import Dataset, DataLoader
import scipy.io as sio


class MyDataset(Dataset):
    def __init__(self, data_path, data_type):
        super(MyDataset, self).__init__()
        self.data_path = data_path
        self.data = sio.loadmat(data_path)
        if data_type == 'train':
            self.features = torch.tensor(self.data['train_features'])
            self.labels = torch.tensor(self.data['train_labels'].reshape(-1))
        elif data_type == 'val':
            self.features = torch.tensor(self.data['val_features'])
            self.labels = torch.tensor(self.data['val_labels'].reshape(-1))
        elif data_type == 'test':
            self.features = torch.tensor(self.data['test_features'])
            self.labels = torch.tensor(self.data['test_labels'].reshape(-1))

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, index):
        return self.features[index], self.labels[index]


class Classification(nn.Module):
    def __init__(self):
        super(Classification, self).__init__()
        self.classification = nn.Sequential(
            nn.Linear(4096, 512),
            nn.ReLU(),
            nn.Linear(512, 4),
        )

    def forward(self, x):
        return self.classification(x)


device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
print("Using {} device training.".format(device.type))
model = Classification().to(device)
train_dataset = MyDataset(data_path=r'D:\Code\2-ZSL\0-data\data\dataset\D0.mat', data_type='train')
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
val_dataset = MyDataset(r'D:\Code\2-ZSL\0-data\data\dataset\D0.mat', 'val')
val_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)

criterion = CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(10):
    model.train()
    train_loss = torch.zeros(1).to(device)
    for step, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        train_loss = (train_loss * step + loss.detach()) / (step + 1)
    print('Epoch {}, Loss: {}'.format(epoch, train_loss.item()))

    model.eval()
    y_true, y_pred = [], []
    val_loss = torch.zeros(1).to(device)
    with torch.no_grad():
        for step, (data, target) in enumerate(val_loader):
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output, target)
            val_loss = (val_loss * step + loss.detach()) / (step + 1)
            _, predicted = torch.max(output, 1)

            y_pred.extend(predicted.cpu().numpy())
            y_true.extend(target.cpu().numpy())

    print('Epoch {}, Loss: {}'.format(epoch, val_loss.item()))
    metric = classification_report(y_true=y_true, y_pred=y_pred, digits=4)
    print(metric)


