import random

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from torch import optim
from torch.utils.data import DataLoader
from data import CustomDataset
from data import AddGaussianNoise
import torchvision.transforms as transforms
from pdw import Know
from visdom import Visdom
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
from tqdm import tqdm

def set_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


class ResidualBlock(nn.Module):
    def __init__(self, in_channels):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, in_channels * 2, kernel_size=1, stride=1, padding=0)
        self.bn1 = nn.BatchNorm1d(in_channels * 2)
        self.conv2 = nn.Conv1d(in_channels * 2, in_channels, kernel_size=1, stride=1, padding=0)
        self.bn2 = nn.BatchNorm1d(in_channels)

    def forward(self, x):
        residual = x
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += residual
        out = F.relu(out)
        return out


# 定义原型网络模型
class PrototypicalNetwork(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(PrototypicalNetwork, self).__init__()
        self.conv1 = nn.Conv1d(1, input_size, kernel_size=1, stride=1)
        self.bn1 = nn.BatchNorm1d(input_size)
        # self.conv2 = nn.Conv1d(input_size, input_size, kernel_size=1, stride=1)
        # self.bn1 = nn.BatchNorm1d(input_size)
        # 添加残差块
        self.residual_blocks = nn.Sequential(
            ResidualBlock(input_size),
            ResidualBlock(input_size),
            ResidualBlock(input_size)
            # 可以根据需要添加更多残差块
        )
        # self.conv3 = nn.Conv1d(hidden_size, hidden_size * 2, kernel_size=1, stride=2, padding=0)
        # self.bn2 = nn.BatchNorm1d(hidden_size * 2)
        # self.conv4 = nn.Conv1d(hidden_size * 2, hidden_size, kernel_size=1, stride=2, padding=0)
        # self.bn3 = nn.BatchNorm1d(hidden_size)
        # self.conv5 = nn.Conv1d(hidden_size, output_size, kernel_size=1, stride=4, padding=0)
        # self.bn4 = nn.BatchNorm1d(output_size)
        self.fc1 = nn.Linear(input_size * input_size, hidden_size)  # 第一个全连接层
        self.fc2 = nn.Linear(hidden_size, hidden_size)  # 第二个全连接层
        self.fc3 = nn.Linear(hidden_size, hidden_size // 4)  # 第三个全连接层
        self.fc4 = nn.Linear(hidden_size // 4, hidden_size // 2)
        self.fc5 = nn.Linear(hidden_size // 2, hidden_size // 2)
        self.fc6 = nn.Linear(hidden_size // 2, output_size)  # 第四个全连接层
        self.dropout = nn.Dropout(0.5)

    def forward(self, x):
        # print(x.shape)
        x = x.unsqueeze(1)
        x = F.relu(self.conv1(x))
        x = F.relu(self.bn1(x))
        x = self.residual_blocks(x)
        print(x.shape)
        x = x.view(x.size(0), -1)  # 展平操作
        x = F.relu(self.fc1(x))  # 使用ReLU激活函数
        x = F.relu(self.fc2(x))  # 第二个隐藏层
        x = F.relu(self.fc3(x))  # 第三个隐藏层
        x = self.dropout(x)
        x = F.relu(self.fc4(x))
        x = F.relu(self.fc5(x))
        x = self.fc6(x)  # 输出层
        return x


# 定义数据集类
# 计算原型
def compute_prototypes(embeddings, labels, num_classes):
    prototypes = torch.zeros(num_classes, embeddings.size(1))
    for i in range(num_classes):
        prototypes[i] = embeddings[labels == i].mean(0)
    return prototypes


# 原型网络损失函数
# def prototypical_loss(prototypes, embeddings, labels):
#     distances = torch.cdist(embeddings, prototypes)
#     log_p_y = F.log_softmax(-distances, dim=1)
#     loss = F.nll_loss(log_p_y, labels)
#     return loss


def prototypical_loss(prototypes, embeddings, labels):
    # 计算查询集样本与每个原型之间的距离
    distances = torch.cdist(embeddings, prototypes)

    # 为了数值稳定性，减去每一行的最大值
    max_distances = torch.max(distances, dim=1, keepdim=True)[0]
    distances_subtracted = distances - max_distances
    # 计算对数概率分布
    log_p_y = F.log_softmax(-distances_subtracted, dim=1)
    # 计算负对数似然损失
    loss = F.nll_loss(log_p_y, labels)
    return loss


def process_batch(proto_net, batch, device, num_classes, num_support):
    support_set = batch[0][:num_classes * num_support].to(device)
    support_labels = batch[1][:num_classes * num_support].to(device)
    query_set = batch[0][num_classes * num_support:].to(device)
    query_labels = batch[1][num_classes * num_support:].to(device)

    support_embeddings = proto_net(support_set)
    prototypes = compute_prototypes(support_embeddings, support_labels, num_classes).to(device)
    query_embeddings = proto_net(query_set).to(device)

    return prototypes, query_embeddings, query_labels


# 主函数
def main():
    seed = 42
    set_seed(seed)
    # visdom可视化工具，需要安装库
    # viz = Visdom()
    device = torch.device('cuda')
    # 数据集参数
    num_classes = 10  # 示例类别数
    num_support = 15  # 每类的支持集样本数
    num_query = 25  # 每类的查询集样本数
    dictdata = loadmat("traindata_5dB.mat")
    dataload = dictdata['traindata']
    # dataload = dataload[:1000]
    # 网络参数
    input_size = 8  # 示例输入大小，如MNIST图像的大小
    hidden_size = 256
    output_size = 64  # 嵌入空间的大小

    # 创建原型网络实例
    proto_net = PrototypicalNetwork(input_size, hidden_size, output_size)
    proto_net.to(device)
    optimizer = optim.Adam(proto_net.parameters(), lr=0.001)
    # 创建数据加载器
    # 假设data和labels是已经加载的数据和标签
    dataset = CustomDataset(dataload, noisy=False)

    train_datatest, test_datatest = train_test_split(dataset, test_size=0.9, random_state=42)
    train_datatest = DataLoader(train_datatest, batch_size=num_classes * (num_support + num_query), shuffle=True)
    test_datatest = DataLoader(test_datatest, batch_size=num_classes * (num_support + num_query), shuffle=True)
    x, y = next(iter(train_datatest))
    # print(x.shape)
    know_instance = Know()  # 创建 Know 类的实例
    know = know_instance.culKnow()  # 调用 culKnow 方法
    know = know.to(device)
    # know = proto_net(know)
    log_file = "training_log1.txt"
    with open(log_file, 'w') as f:
        # 训练循环
        for epoch in range(500):
            proto_net.train()
            epoch_loss = 0
            batch_count = 0
            for batch in tqdm(train_datatest, desc=f'Epoch {epoch + 1}'):
                # 分割支持集和查询集
                prototypes, query_embeddings, query_labels = process_batch(proto_net, batch, device, num_classes,
                                                                           num_support)

                # 计算损失
                loss = prototypical_loss(prototypes, query_embeddings, query_labels)

                # 反向传播和优化
                optimizer.zero_grad()
                loss.backward(retain_graph=True)
                if epoch > 10:
                    # 先验知识注入
                    new_loss = prototypical_loss(know, query_embeddings, query_labels)
                    new_loss.backward(retain_graph=True)
                optimizer.step()
                epoch_loss += loss.item()
                batch_count += 1
                # print(f'Epoch {epoch}, Loss: {loss.item()}')
            avg_loss = epoch_loss / batch_count
            print(f'Epoch {epoch + 1}, Average Loss: {avg_loss:.4f}')
            # viz.line([loss.item()], [epoch], win='test1', update='append',
            #          opts={'title': '损失曲线', 'xlabel': '迭代次数', 'ylabel': '准确率'})
            # 先验知识注入后结果输出
            # print(f'Epoch {epoch}, Loss: {loss.item()}, newloss: {new_loss.item()}')
            # 测试部分
            proto_net.eval()
            with torch.no_grad():
                correct = 0
                total = 0
                accuracy = 0
                for batch1 in test_datatest:
                    # 同样分割支持集和查询集
                    prototypes, query_embeddings, query_labels = process_batch(proto_net, batch1, device,
                                                                               num_classes, num_support)
                    distances = torch.cdist(query_embeddings, prototypes)
                    # print(distances)
                    # 计算对数概率分布
                    _, predicted = torch.min(distances, dim=1)

                    # 更新正确预测的总数
                    total += query_labels.size(0)
                    correct += (predicted == query_labels).sum().item()

                accuracy = 100 * correct / total
                print(f'Epoch {epoch}, Test Accuracy: {accuracy}%')
            f.write(f'Epoch {epoch}, Loss: {loss}, Accuracy: {accuracy}\n')
        # viz.line([accuracy], [epoch], win='test2', update='append',
        #                  opts={'title': '测试准确率', 'xlabel': '迭代次数', 'ylabel': '准确率'})
    torch.save(proto_net.state_dict(), 'my_model.pth')
    print("Training log saved to:", log_file)
    with torch.no_grad():
        final_test_correct = 0
        final_test_total = 0
        final_test_accuracy = 0
        all_labels = []
        all_preds = []
        for batch1 in test_datatest:
            inputs, labels = batch1
            inputs, labels = inputs.to(device), labels.to(device)

            # 获取嵌入
            embeddings = proto_net(inputs).to(device)

            # 计算原型
            prototypes = compute_prototypes(embeddings, labels, num_classes).to(device)

            # 计算距离并分类
            distances = torch.cdist(embeddings, prototypes)
            _, predicted = torch.min(distances, dim=1)

            # 更新正确预测的总数
            final_test_total += labels.size(0)
            final_test_correct += (predicted == labels).sum().item()
            all_labels.extend(labels.cpu().numpy())
            all_preds.extend(predicted.cpu().numpy())
        # 计算最终测试准确率
        final_test_accuracy = 100 * final_test_correct / final_test_total
        print(f'Final Test Accuracy: {final_test_accuracy}%')
        # 生成混淆矩阵
        cm = confusion_matrix(all_labels, all_preds)
        disp = ConfusionMatrixDisplay(confusion_matrix=cm)
        disp.plot(cmap=plt.cm.Blues)
        plt.title('Confusion Matrix')
        plt.show()


if __name__ == '__main__':
    main()