import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from torch import nn
import torchvision
from torch.nn import *
from pytorch_grad_cam import GradCAM
import global_var
from torch.utils.data import DataLoader


class MyDataset(Dataset):
    def __init__(self,data,num):
        imgs = []
        for i in range(num):
            tp = (data[0][i], data[1][i])
            imgs.append(tp)
        self.imgs = imgs

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self, index):
        pic, label = self.imgs[index]
        pic = torch.tensor(abs(pic[0]),dtype=torch.float32)
        pic = pic.unsqueeze(0)      # 增加1个维度，从(91,91)变为(1,91,91)

        return pic, label


class Mydataset_s2(Dataset):
    def __init__(self,data,num,test = False,transform = None):
        imgs = []
        if test == False:
            for i in range(num):
                tp = (data[0][i], data[1][i])
                imgs.append(tp)
        else:
            imgs.append(data)
        self.imgs = imgs
        self.transform = transform
        self.test = test

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self, index):
        pic, label = self.imgs[index]
        if self.test == False:
            pic = torch.tensor(abs(pic[0]),dtype=torch.float32)
            label = torch.tensor(abs(label[0]),dtype=torch.float32)
        else:
            print("test_shape", pic.shape, label.shape)
            pic = torch.tensor(abs(pic),dtype=torch.float32)
            label = torch.tensor(abs(label),dtype=torch.float32)

        if self.transform is None:
            pic = pic.unsqueeze(0)      # 增加1个维度，从(91,91)变为(1,91,91)
        else:
            label = label.unsqueeze(0)
            pic = self.transform(pic)
            label = self.transform(label)
        return pic, label

class my_module(nn.Module):
    def __init__(self):
        super(my_module,self).__init__()
        self.layer = Sequential(
            Conv2d(in_channels=1,out_channels=32,kernel_size=5),
            nn.ReLU(inplace=True),
            MaxPool2d(2),
            Conv2d(in_channels=32, out_channels=64, kernel_size=5),
            nn.ReLU(inplace=True),
            MaxPool2d(2)
        )
        self.fc = Sequential(
            Flatten(),           # 将输入的tensor展平
            nn.Linear(23104, 512),
            nn.ReLU(inplace=True),
            nn.Linear(512, 1)  # 输出大小为1
        )

    def forward(self, x):
        x = self.layer(x)	# 经过建立的model计算
        x = self.fc(x)
        return x


# 冻结某些层
def set_parameter_require_grad(model, feature_extract):
    if feature_extract:
        for name, value in model.named_parameters():
            if name != "layer4" and name != "avgpool" and name != "fc":
                value.requires_grad = False     # 冻结
    else:
        for name, value in model.named_parameters():
            value.requires_grad = True


def find_max_loss(j, losses, loss, max_loss_value, max_loss_index):
    losses.append(loss.item())  # 将批次损失值扩展到列表中
    # 更新最大损失值和索引
    if loss.max().item() > max_loss_value:
        max_loss_value = loss.max().item()
        max_loss_index = int(j * 1 + loss.argmax().item())
    return max_loss_index,max_loss_value


def find_min_loss(j, losses, loss, min_loss_value, min_loss_index):
    losses.append(loss.item())  # 将批次损失值扩展到列表中
    # 更新最小损失值和索引
    if loss.min().item() < min_loss_value:
        min_loss_value = loss.min().item()
        min_loss_index = int(j * 1 + loss.argmin().item())
    return min_loss_index,min_loss_value


# 添加噪声
def add_noise(noise,smatrix):
    noise_alpha = 0.5           # 噪声强度
    noise_id = 125              # 噪声下标
    n1 = np.around(noise[noise_id],3)
    s_noise = noise_alpha * n1
    S_with_noise = np.empty_like(smatrix,dtype=np.ndarray)   # shape = (720,1)
    for i in range(smatrix.shape[0]):
        S_with_noise[i,0] = s_noise + abs(smatrix[i,0])
    return S_with_noise


# 构建第二个网络的输入数据集
def save_2_matrix(image_train, target_train, output, target, batch_size):
    for i in range(batch_size):
        absolute_diff = np.abs(target_train - output[i].detach().numpy())
        index = np.unravel_index(absolute_diff.argmin(), target_train.shape)      # 预测出的长度对应的散射矩阵
        absolute_diff2 = np.abs(target_train - target[i].detach().numpy())
        index2 = np.unravel_index(absolute_diff2.argmin(), target_train.shape)      # 真实值对应的散射矩阵
        # 在训练集找到散射矩阵
        predic_matrix = abs(image_train[index])
        real_matrix = abs(image_train[index2])
        global_var.p_matrix.append(predic_matrix)
        global_var.r_matrix.append(real_matrix)


def create_2_dataset(net, real_matrix, predic_matrix, train=True):
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        # transforms.Normalize((0.5,), (0.5,))
    ])
    if train == True:   # 构建训练数据集
        for i in range(len(real_matrix)):
            mx_tensor = torch.tensor(real_matrix[i], dtype=torch.float)
            cam = GradCAM(net, [net.layer4[-1]])
            img_tensor = mx_tensor.unsqueeze(0).unsqueeze(0)
            activation_map = cam(img_tensor)

            img1 = np.stack([real_matrix[i], activation_map.squeeze(0), predic_matrix[i]])   # shape = (3,91,91)
            global_var.s2_train_img[i,0] = img1
            global_var.s2_train_target[i,0] = real_matrix[i]           # shape = (91,91)

        train_2_dataset = Mydataset_s2((global_var.s2_train_img, global_var.s2_train_target), 720, transform=transform)
        train_2_dataloader = DataLoader(train_2_dataset, batch_size=10)
        return train_2_dataloader

    else:           # 构建测试数据集
        mx_tensor = torch.tensor(real_matrix, dtype=torch.float)
        cam = GradCAM(net, [net.layer4[-1]])
        img_tensor = mx_tensor.unsqueeze(0).unsqueeze(0)
        activation_map = cam(img_tensor)

        img1 = np.stack([real_matrix, activation_map.squeeze(0), predic_matrix])
        test_2_dataset = Mydataset_s2((img1, real_matrix), 1, test=True,transform=transform)
        # print(test_2_dataset[0][0].shape,test_2_dataset[0][1].shape)
        test_2_dataloader = DataLoader(test_2_dataset, batch_size=1)
        return test_2_dataloader

# m1 = my_module()
# print(m1)
# input = torch.ones((1, 1, 91, 91))     # 测试神经网络的正确性
# output = m1(input)
# print(output.shape)