import numpy as np
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F


class Code:
    def __init__(self, C, D, PRF, turnback, duty, modulate, staynum, pulsewidth, label):
        self.C = C
        self.D = D
        self.PRF = PRF
        self.turnback = turnback
        self.duty = duty
        self.modulate = modulate
        self.staynum = staynum
        self.pulsewidth = pulsewidth
        self.label = label

    def concatenate_to_numpy(self):
        concatenated_list = self.C + self.D + self.PRF + self.turnback + self.duty + self.modulate + self.staynum + self.pulsewidth + self.label
        # 将列表转化为NumPy数组
        concatenated_array = np.array(concatenated_list)
        return concatenated_array


VS = Code([0.1305, 0.3728, 0.2423], [0.8942, 1.0389, 1.1836], [0], [0], [10, 20, 30], [0], [1500, 3750, 7500],
          [1, 2, 3], [0])
VS_array = VS.concatenate_to_numpy()
HRWS = Code([0.1311, 0.187, 0.2429], [0.8955, 1.041, 1.1856], [0], [0], [10, 15, 25], [1], [250, 1250, 2000],
            [1, 2, 3], [1])
HRWS_array = HRWS.concatenate_to_numpy()
MRWS = Code([0.1311, 0.187, 0.2429], [0.8955, 1.041, 1.1856], [0], [0], [10, 15, 25], [1], [250, 1250, 2000],
            [2, 3, 4], [2])
MRWS_array = MRWS.concatenate_to_numpy()
TWS = Code([0.1314, 0.1873, 0.2433], [0.8979, 1.0434, 1.1889], [2], [0], [10, 20, 30], [0], [16, 64, 128],
           [0.81, 2.01, 4], [3])
TWS_array = TWS.concatenate_to_numpy()
TAS = Code([0.0451, 0.1332, 0.2213], [0.8552, 1.0719, 1.2886], [2], [1], [10, 20, 30], [0], [16, 64, 128],
           [0.28, 2.28, 4], [4])
TAS_array = TAS.concatenate_to_numpy()
STT = Code([0, 0.0025, 0.005], [0, 0.0025, 0.005], [3], [0], [0.1, 2.6, 5], [0], [20000, 20000, 20000], [1, 2, 3], [5])
STT_array = STT.concatenate_to_numpy()
SAM = Code([0.0432, 0.1312, 0.2192], [0.8543, 1.0708, 1.2873], [2], [1], [10, 20, 30], [0], [16, 64, 128], [1, 2, 3],
           [6])
SAM_array = SAM.concatenate_to_numpy()
GMTI = Code([0.1305, 0.3728, 0.2423], [0.8942, 1.0389, 1.1836], [0], [0], [0.1, 15.1, 25], [0], [20, 320, 550],
            [2, 32, 60], [7])
GMTI_array = GMTI.concatenate_to_numpy()
GMTT = Code([0.0451, 0.1332, 0.2213], [0.8552, 1.0719, 1.2886], [0], [1], [0.1, 15.1, 25], [0], [20, 470, 1000],
            [2, 32, 60], [8])
GMTT_array = GMTT.concatenate_to_numpy()
SAR = Code([0.1314, 0.1873, 0.2433], [0.8979, 1.0434, 1.1889], [1], [1], [1, 16, 25], [1], [150, 50100, 100000],
           [3, 32, 33], [9])
SAR_array = SAR.concatenate_to_numpy()
combined_2d_array = np.vstack(
    [VS_array, HRWS_array, MRWS_array, TWS_array, TAS_array, STT_array, SAM_array, GMTI_array, GMTT_array, SAR_array])




class CustomDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        x = torch.tensor(self.data[index, :-1], dtype=torch.float32)
        y = torch.tensor(self.data[index, -1], dtype=torch.long)
        return x, y


dataSet = CustomDataset(combined_2d_array)
batch_size = len(dataSet)
dataLoader = DataLoader(dataSet, batch_size=batch_size, shuffle=True)



class MLP(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)  # 第一个全连接层
        self.fc2 = nn.Linear(hidden_size, hidden_size)  # 第二个全连接层
        self.fc3 = nn.Linear(hidden_size, hidden_size // 2)  # 第三个全连接层
        self.fc4 = nn.Linear(hidden_size // 2, output_size)  # 第四个全连接层

    def forward(self, x):
        x = F.relu(self.fc1(x))  # 使用ReLU激活函数
        x = F.relu(self.fc2(x))  # 第二个隐藏层
        x = F.relu(self.fc3(x))  # 第三个隐藏层
        x = self.fc4(x)  # 输出层
        return x


def compute_prototypes(embeddings, labels, num_classes):
    prototypes = torch.zeros(num_classes, embeddings.size(1))
    for i in range(num_classes):
        prototypes[i] = embeddings[labels == i].mean(0)
    return prototypes


input_size = 18
hidden_size = 256
output_size = 64
model = MLP(input_size, hidden_size, output_size)
device = torch.device('cuda')
model = model.to(device)

model.to(device)


class Know:
    def __init__(self):
        self.know = None

    def culKnow(self):
        for batch in dataLoader:
            embedding = model(batch[0].to(device))
            label = batch[1].to(device)
            know = compute_prototypes(embedding, label, 10)
        return know
