import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from torch.utils.data import Dataset, TensorDataset, DataLoader
from sklearn.metrics import f1_score


# 参数的设置及将模型移动到指定的设备上
num_epochs = 10  # 轮数
learning_rate = 0.002
in_channels = 93
out_channels = 93
kernel_size = 1
num_classes = 6


# 用于创建自定义的数据集对象，并包含了一些辅助函数用于将数据移动到设备上。
class MyDataset(Dataset):
    def __init__(self, data, targets):  # , transform=None
        self.data = data
        self.labels = torch.LongTensor(targets)

    def __getitem__(self, index):
        x = self.data[index]
        y = self.labels[index]

        x = torch.FloatTensor(x)
        y = torch.from_numpy(np.array(y))
        return x, y

    def __len__(self):
        return len(self.data)


# 选择CPU还是GPU
def get_default_device():
    """Pick GPU if available, else CPU"""
    if torch.cuda.is_available():
        return torch.device('cuda')
    else:
        return torch.device('cpu')


# 用于将数据移动到指定的设备上。它可以处理单个张量或张量列表，递归地将它们移动到指定的设备上。
def to_device(data, device):
    """Move tensor(s) to chosen device"""
    if isinstance(data, (list, tuple)):
        return [to_device(x, device) for x in data]
    return data.to(device, non_blocking=True)


# 是一个包装器，用于将数据加载器中的数据移动到指定的设备上。它接受一个数据加载器对象和一个设备对象，并将每个批次的数据移动到指定的设备上。
class DeviceDataLoader:
    """Wrap a dataloader to move data to a device"""

    def __init__(self, dl, device):
        self.dl = dl
        self.device = device

    def __iter__(self):
        """Yield a batch of data after moving it to device"""
        for b in self.dl:
            yield to_device(b, self.device)

    def __len__(self):
        """Number of batches"""
        return len(self.dl)


# 定义卷积神经网络结构及参数，设置训练过程和验证过程，epoch_end 方法在每个时期结束时打印训练和验证的指标结果
class OneDCNN(nn.Module):
    def __init__(self, num_classes, in_channels, out_channels, kernel_size=1, stride=1, dropout_rate=0.1):
        super(OneDCNN, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.dropout_rate = dropout_rate

        self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size)
        self.relu = nn.ReLU(inplace=True)
        self.dropout = nn.Dropout(dropout_rate)
        self.fc1 = nn.Linear(93, 96)
        self.fc2 = nn.Linear(96, num_classes)

    def forward(self, x):
        x = self.conv1d(x)
        x = self.relu(x)
        x = x.view(x.size(0), -1)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return x

    def training_step(self, batch):
        data, labels = batch
        out = self(data)  # Generate predictions
        loss = F.cross_entropy(out, labels)  # Calculate loss
        f1 = f1_score1(out, labels)
        return f1, loss

    def validation_step(self, batch):
        data, labels = batch
        out = self(data)
        loss = F.cross_entropy(out, labels)
        f1 = f1_score1(out, labels)
        return {'val_loss': loss, 'val_f1': f1}

    def validation_epoch_end(self, outputs):
        batch_losses = [x['val_loss'] for x in outputs]
        epoch_loss = torch.stack(batch_losses).mean()
        batch_f1s = [x['val_f1'] for x in outputs]
        epoch_f1 = torch.stack(batch_f1s).mean()
        return {'val_loss': epoch_loss.item(), 'val_f1': epoch_f1.item()}

    def epoch_end(self, epoch, result):
        print("Epoch [{}]: train_f1: {:.4f}, train_loss: {:.4f}, val_loss: {:.4f}, val_f1: {:.4f}".format(
            epoch, result['train_f1'], result['train_loss'], result['val_loss'], result['val_f1']))


def f1_score1(outputs, labels):
    _, preds = torch.max(outputs, dim=1)
    f1 = f1_score(labels, preds, average='macro')
    return torch.tensor(f1)
