import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.io import loadmat
from torch import optim
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay


def set_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


class ResidualBlock(nn.Module):
    def __init__(self, in_channels):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, in_channels * 2, kernel_size=1)
        self.bn1 = nn.BatchNorm1d(in_channels * 2)
        self.conv2 = nn.Conv1d(in_channels * 2, in_channels, kernel_size=1)
        self.bn2 = nn.BatchNorm1d(in_channels)

    def forward(self, x):
        residual = x
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += residual
        return F.relu(out)


class PrototypicalNetwork(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, nhead=8, num_layers=2, seq_len=8):
        super(PrototypicalNetwork, self).__init__()
        self.conv1 = nn.Conv1d(1, input_size, kernel_size=1)
        self.bn1 = nn.BatchNorm1d(input_size)
        self.res_blocks = nn.Sequential(
            ResidualBlock(input_size),
            ResidualBlock(input_size),
            ResidualBlock(input_size)
        )
        encoder_layer = nn.TransformerEncoderLayer(d_model=input_size, nhead=nhead, batch_first=True)
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.fc = nn.Sequential(
            nn.Linear(seq_len * input_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size // 2),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(hidden_size // 2, output_size)
        )

    def forward(self, x):
        x = x.unsqueeze(1)  # (B, 1, F)
        x = F.relu(self.bn1(self.conv1(x)))
        x = self.res_blocks(x)
        x = x.permute(0, 2, 1)  # (B, seq_len, F)
        x = self.transformer(x)
        x = x.reshape(x.size(0), -1)
        return self.fc(x)


class FewShotDataset:
    def __init__(self, data, labels):
        self.data = data.astype(np.float32)
        self.labels = labels.astype(int)
        self.class_to_indices = self.build_class_index()

    def build_class_index(self):
        class_to_indices = {}
        for idx, label in enumerate(self.labels):
            class_to_indices.setdefault(label, []).append(idx)
        return class_to_indices

    def get_items(self, classes, num_samples):
        data, labels = [], []
        for cls in classes:
            idx = random.sample(self.class_to_indices[cls], num_samples)
            data.append(self.data[idx])
            labels.extend([cls] * num_samples)
        return torch.tensor(np.concatenate(data)), torch.tensor(labels)


def episodic_loader(dataset, n_way, k_shot, q_query):
    selected_classes = random.sample(list(dataset.class_to_indices.keys()), n_way)
    support_x, support_y = dataset.get_items(selected_classes, k_shot)
    query_x, query_y = dataset.get_items(selected_classes, q_query)
    label_map = {label: i for i, label in enumerate(selected_classes)}
    support_y = torch.tensor([label_map[y.item()] for y in support_y])
    query_y = torch.tensor([label_map[y.item()] for y in query_y])
    return support_x, support_y, query_x, query_y


class SupportSelfAttention(nn.Module):
    def __init__(self, dim, num_heads=4, dropout=0.1):
        super().__init__()
        self.mha = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True)
        self.norm = nn.LayerNorm(dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, E):  # E: (K, D)
        E2, _ = self.mha(E.unsqueeze(0), E.unsqueeze(0), E.unsqueeze(0))  # (1, K, D)
        E2 = E2.squeeze(0)
        return self.norm(E + self.dropout(E2))


# def compute_prototypes(embeddings, labels, num_classes):
#     prototypes = []
#     for i in range(num_classes):
#         emb = embeddings[labels == i]
#         sim_matrix = F.cosine_similarity(emb.unsqueeze(1), emb.unsqueeze(0), dim=2)
#         weights = sim_matrix.mean(dim=1)
#         prototypes.append((emb * weights.unsqueeze(1)).sum(0) / weights.sum())
#     return torch.stack(prototypes)

def compute_prototypes(embeddings, labels, num_classes, attn_module=None):
    """
    embeddings: (N_support, D)
    labels: (N_support,)
    num_classes: N-way
    attn_module: SupportSelfAttention 或 None
    """
    prototypes = []
    D = embeddings.size(1)
    for i in range(num_classes):
        mask = (labels == i)
        E = embeddings[mask]  # (K, D)
        if attn_module is not None:
            E = attn_module(E)  # self-attention 增强 (K, D)
        # 计算余弦相似度加权
        sim = F.cosine_similarity(E.unsqueeze(1), E.unsqueeze(0), dim=2)
        w = sim.mean(dim=1)  # (K,)
        proto = (E * w.unsqueeze(1)).sum(dim=0) / (w.sum() + 1e-8)
        prototypes.append(proto)
    return torch.stack(prototypes)  # (num_classes, D)


def prototypical_loss(prototypes, embeddings, labels):
    dists = torch.cdist(embeddings, prototypes)
    log_p_y = F.log_softmax(-dists, dim=1)
    return F.nll_loss(log_p_y, labels)


def train(model, dataset, device, optimizer, attn_module, n_way=5, k_shot=5, q_query=15, episodes=100):
    model.train()
    for episode in range(episodes):
        support_x, support_y, query_x, query_y = episodic_loader(dataset, n_way, k_shot, q_query)
        support_x, support_y = support_x.to(device), support_y.to(device)
        query_x, query_y = query_x.to(device), query_y.to(device)

        emb_support = model(support_x)
        emb_query = model(query_x)
        prototypes = compute_prototypes(emb_support, support_y, n_way, attn_module=attn_module)
        loss = prototypical_loss(prototypes, emb_query, query_y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        print(f"[Episode {episode + 1}] Loss: {loss.item():.4f}")


def evaluate(model, dataset, device, attn_module, n_way=5, k_shot=5, q_query=15, episodes=200):
    model.eval()
    correct, total = 0, 0
    all_preds, all_labels = [], []
    with torch.no_grad():
        for _ in range(episodes):
            support_x, support_y, query_x, query_y = episodic_loader(dataset, n_way, k_shot, q_query)
            support_x, support_y = support_x.to(device), support_y.to(device)
            query_x, query_y = query_x.to(device), query_y.to(device)
            emb_support = model(support_x)
            emb_query = model(query_x)
            prototypes = compute_prototypes(emb_support, support_y, n_way, attn_module=attn_module)
            dists = torch.cdist(emb_query, prototypes)
            preds = torch.argmin(dists, dim=1)
            correct += (preds == query_y).sum().item()
            total += query_y.size(0)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(query_y.cpu().numpy())
    acc = 100 * correct / total
    print(f"Evaluation Accuracy: {acc:.2f}%")
    cm = confusion_matrix(all_labels, all_preds)
    disp = ConfusionMatrixDisplay(confusion_matrix=cm)
    disp.plot(cmap=plt.cm.Blues)
    plt.title("Confusion Matrix")
    plt.show()
    return acc


def main():
    set_seed(42)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 假设 model 输出维度为 hidden_size = 256
    mat = loadmat("traindata_10dB.mat")
    data = mat['traindata'][:, :-1]
    labels = mat['traindata'][:, -1]
    dataset = FewShotDataset(data, labels)

    model = PrototypicalNetwork(input_size=8, hidden_size=256, output_size=64).to(device)
    attn_module = SupportSelfAttention(dim=64, num_heads=4).to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    train(model, dataset, device, optimizer, attn_module=attn_module, n_way=3, k_shot=1, q_query=15, episodes=200)
    evaluate(model, dataset, device, attn_module=attn_module, n_way=3, k_shot=1, q_query=15, episodes=200)


if __name__ == '__main__':
    main()
