import numpy as np
import scipy.io as scio
import torch
from torch.utils.data import DataLoader
from data_class import *
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
import torch
from torchvision.models import *
import matplotlib.pyplot as plt
from pytorch_grad_cam import GradCAM
import global_var
import cv2
import cam_pic
import second_model
from unet import unet_model
# 定义训练的设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 读取mat, 创建数据集
crack_db = scio.loadmat("../crack_db.mat")['crack_db']
crack_st = scio.loadmat("../crack_test.mat")['crack_test']
noise_db = scio.loadmat("../coh_noise_database.mat")['coh_noise_database']
image_train = crack_db[0,0]['S']      # 数据库的散射矩阵
target_train = crack_db[0,0]['size']  # 数据库的裂纹长度
image_test = crack_st[0,0]['S']
target_test = crack_st[0,0]['size']
noise = noise_db[0,0]['S']             # 噪声

image_train_withNoise = add_noise(noise[0],image_train)
image_test_withNoise = add_noise(noise[0],image_test)

train_data = (image_train_withNoise, target_train)    # 创建元组
test_data = (image_test_withNoise, target_test)
# print(train_data[1][1])

train_dataset = MyDataset(train_data,720)       # 创建训练集
test_dataset = MyDataset(test_data, 684)        # 创建测试集
train_dataloader = DataLoader(train_dataset,batch_size=10)
test_dataloader = DataLoader(test_dataset,batch_size=1)



# 神经网络模型
net = torchvision.models.resnet18(resnet.BasicBlock,pretrained=True)
net.conv1 = nn.Conv2d(in_channels=1,out_channels=64,kernel_size=7,stride=1,padding=3)
set_parameter_require_grad(net,True)    # 冻结除全连接以外的层
# my_net = my_module()
net = net.to(device)
# 获取全连接层之前的所有层
num_features = net.fc.in_features
# 添加自定义的全连接层
net.fc = nn.Sequential(
    nn.Linear(num_features, 512),  # 假设输出维度为512
    nn.ReLU(inplace=True),
    nn.Linear(512, 1)  # 输出大小为1
)

# 损失函数
loss_fn = nn.MSELoss()
loss_fn = loss_fn.to(device)
# 创建优化器
learn_rate = 0.005
optimizer = torch.optim.SGD(net.parameters(),lr=learn_rate)
# 设置训练网络的参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练轮数
epoch = 8
batch_size = 1
# 使用tensorboard观测
writer = SummaryWriter("./logs_train")
# 测试模型并记录损失值
losses = []
max_loss_index = 0
max_loss_value = float('-inf')
# max_loss_value = float('inf')
best_loss = float('inf')
for i in range(epoch):
    print("第{}轮训练开始".format(i+1))
    # 训练步骤
    running_loss = 0.0
    for j, data in enumerate(train_dataloader):
        imgs, targets = data        # img是散射矩阵，target是裂纹长度
        imgs = imgs.to(device)
        targets = targets.to(device)

        outputs = net(imgs)
        if i == epoch-1:
            save_2_matrix(image_train,target_train,outputs,targets,10)

        targets = targets.to(torch.float32)
        loss = loss_fn(outputs,targets)  # 训练的损失
        # 优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

    epoch_loss = running_loss / len(train_dataloader)
    if epoch_loss < best_loss:  # 保存loss最小的模型参数
        best_loss = epoch_loss
        torch.save(net.state_dict(), 'best_model_size.pth')
    total_train_step += 1
    print("训练次数:{},loss:{}".format(total_train_step,epoch_loss))
    writer.add_scalar("tran_loss",epoch_loss,total_train_step)     # 用折线图观测

# 加载训练当中loss最小的模型参数
best_net = net
best_net.load_state_dict(torch.load("best_model_size.pth"))
total_test_loss = 0
output_save= []
target_save= []
best_net.eval()
with torch.no_grad():
    for j,data in enumerate(test_dataloader):
        imgs, targets = data
        imgs = imgs.to(device)
        targets = targets.to(device)
        outputs = best_net(imgs)

        output_save.append(outputs)
        target_save.append(targets)
        loss = loss_fn(outputs,targets)
        max_loss_index, max_loss_value = find_max_loss(j, losses, loss, max_loss_value, max_loss_index)   # 寻找最大损失
        # max_loss_index, max_loss_value = find_min_loss(j, losses, loss, max_loss_value, max_loss_index)     # 寻找最小损失
        total_test_loss = total_test_loss + loss*batch_size
total_test_loss = total_test_loss/684
print("整体测试集上的loss:{}".format(total_test_loss.item()))  # 测试的损失
# writer.add_scalar("test_loss",total_test_loss,total_test_step)
# total_test_step += 1

# 最大损失对应的裂纹长度，在测试集中找到对应的散射矩阵
absolute_diff = np.abs(target_test - output_save[max_loss_index].numpy())
index = np.unravel_index(absolute_diff.argmin(), target_test.shape)
absolute_diff2 = np.abs(target_test - target_save[max_loss_index].numpy())
index2 = np.unravel_index(absolute_diff2.argmin(), target_test.shape)
# 最大损失预测的裂纹长度，在测试集中找到对应的散射矩阵
predic_maxloss_matrix = abs(image_test[index])
real_maxloss_matrix = abs(image_test[index2])
print("index1",index)
print("index2",index2)
size_loss = abs(target_save[max_loss_index].numpy()-output_save[max_loss_index].numpy())
print("size_loss=",size_loss)

set_parameter_require_grad(best_net,False)

activation_map = cam_pic.gradcam_preprocess(best_net,real_maxloss_matrix)
# cam_pic.result_show(predic_maxloss_matrix,real_maxloss_matrix,activation_map)
# plt.show()

unet = unet_model.UNet(1)
unet = unet.to(device)
learn_rate2 = 0.01
optimizer2 = torch.optim.SGD(unet.parameters(),lr=learn_rate2)

train_loader2 = create_2_dataset(best_net,global_var.r_matrix,global_var.p_matrix)   # 构建第二个网络的数据集
second_model.s2_model_train(unet, 3, train_loader2, loss_fn, optimizer2, device)
test_loader2 = create_2_dataset(best_net, real_maxloss_matrix, predic_maxloss_matrix, train=False)
second_model.s2_model_test(unet,loss_fn, test_loader2,device)
cam_pic.result_show(predic_maxloss_matrix,real_maxloss_matrix,activation_map)
plt.show()
writer.close()