import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import StepLR
import time
import matplotlib.colors as mcolors
# from torchviz import make_dot
# import os
#
#
# #
# os.environ["PATH"] += os.pathsep + r"E:\Graphviz\bin"


# 定义 CNN 模型
class CNNModel(nn.Module):
    def __init__(self):
        super(CNNModel, self).__init__()

        # Encoder
        self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm2d(64)
        self.conv2 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.conv3 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
        self.bn3 = nn.BatchNorm2d(256)
        self.conv4 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
        self.bn4 = nn.BatchNorm2d(512)

        # Decoder
        self.upconv1 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
        self.conv5 = nn.Conv2d(512, 256, kernel_size=3, padding=1)
        self.bn5 = nn.BatchNorm2d(256)

        self.upconv2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
        self.conv6 = nn.Conv2d(256, 128, kernel_size=3, padding=1)
        self.bn6 = nn.BatchNorm2d(128)

        self.upconv3 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
        self.conv7 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
        self.bn7 = nn.BatchNorm2d(64)

        # Output layer
        self.final_conv = nn.Conv2d(64, 6, kernel_size=1)  # 输出6个通道

    def forward(self, x):
        # Encoder
        x1 = F.relu(self.bn1(self.conv1(x)))  # (batch_size, 64, 101, 51)
        x2 = F.relu(self.bn2(self.conv2(F.max_pool2d(x1, 2))))  # (batch_size, 128, 50, 25)
        x3 = F.relu(self.bn3(self.conv3(F.max_pool2d(x2, 2))))  # (batch_size, 256, 25, 12)
        x4 = F.relu(self.bn4(self.conv4(F.max_pool2d(x3, 2))))  # (batch_size, 512, 12, 6)

        # Decoder
        x4_upsampled = F.interpolate(self.upconv1(x4), size=x3.shape[2:], mode='bilinear', align_corners=False)
        x5 = F.relu(self.bn5(self.conv5(torch.cat((x3, x4_upsampled), dim=1))))  # (batch_size, 256, 25, 12)

        x5_upsampled = F.interpolate(self.upconv2(x5), size=x2.shape[2:], mode='bilinear', align_corners=False)
        x6 = F.relu(self.bn6(self.conv6(torch.cat((x2, x5_upsampled), dim=1))))  # (batch_size, 128, 50, 25)

        x6_upsampled = F.interpolate(self.upconv3(x6), size=x1.shape[2:], mode='bilinear', align_corners=False)
        x7 = F.relu(self.bn7(self.conv7(torch.cat((x1, x6_upsampled), dim=1))))  # (batch_size, 64, 101, 51)

        # Output
        output = self.final_conv(x7)  # (batch_size, 6, 101, 51) (B,C,H,W)
        return output

def train_model(model, train_dataloader, criterion, optimizer, num_epochs=10):
    model.train()  # 设置模型为训练模式
    for epoch in range(num_epochs):
        running_loss = 0.0
        for inputs, targets in train_dataloader:
            optimizer.zero_grad()  # 清空之前的梯度
            outputs = model(inputs)  # 前向传播
            # print("train_output shape:", outputs.shape)   #(B,C,H,W)
            # print("target shape:",targets.shape)
            loss = criterion(outputs, targets)  # 计算损失
            loss.backward()  # 反向传播
            optimizer.step()  # 更新模型参数
            scheduler.step()  # 更新学习率
            running_loss += loss.item() * inputs.size(0)  # 累加损失值

        epoch_loss = running_loss / len(train_dataloader.dataset)
        if epoch % train_print_gap == 0:
            print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {epoch_loss:.4f}")


def test_model(model, test_dataloader, criterion):
    model.eval()  # 设置模型为评估模式
    total_loss = 0.0
    all_targets = []  # 用于累积所有真实值
    all_outputs = []  # 用于累积所有预测值
    m = 10
    all_relative_test_error = [] # 用于保存最后m个样本中相对误差最小的图像预测结果
    output_path = "../figure/example.png"  # 相对误差最小的图像的保存路径和文件名

    with torch.no_grad():  # 禁用梯度计算
        for inputs, targets in test_dataloader:
            outputs = model(inputs)  # 前向传播
            loss = criterion(outputs, targets)  # 计算损失
            total_loss += loss.item() * inputs.size(0)

            # 累积所有预测值和真实值
            all_targets.append(targets.cpu().numpy())
            all_outputs.append(outputs.cpu().numpy())

        test_loss = total_loss / len(test_dataloader.dataset)
        print(f"Test Loss: {test_loss:.4f}")
        # 选择最后一个测试样本，输出预测结果与真实结果的图像
        # sample_idx = 199;
        print(f"output_batch:{outputs.shape[0]}")
        for idx in range(m):

            output_batch_idx = outputs.shape[0] - idx - 1  #
            # print("plot_output shape:",sample_output.shape)
            # print("plot_pred shape:", sample_output.shape)
            sample_output = targets[output_batch_idx,0,:,:].cpu().numpy()
            sample_output_pred = outputs[output_batch_idx,0,:,:].cpu().numpy()
            # 计算相对测试误差
            target = torch.tensor(sample_output).contiguous().view(-1, 101 * 51)
            predict = torch.tensor(sample_output_pred).contiguous().view(-1, 101 * 51)
            # print(sample_output.shape)
            relative_test_error = torch.linalg.norm(target - predict, ord='fro') / torch.linalg.norm(target,
                                                                                                     ord='fro')  # 计算相对误差
            all_relative_test_error.append(relative_test_error)
            # print(f"Relative Test Error: {relative_test_error:.4e}")

            # 绘制预测结果与真实结果的图像
            fig, axes = plt.subplots(1, 2, figsize=(12, 6))
            vmin = min(sample_output.min(), sample_output_pred.min())
            vmax = max(sample_output.max(), sample_output_pred.max()) # 为了添加统一的color bar
            im0 = axes[0].imshow(sample_output, cmap='viridis',norm=mcolors.Normalize(vmin=vmin, vmax=vmax))  #import matplotlib.colors as mcolors
            # fig.colorbar(im0,ax=axes[0])  # 添加颜色条
            axes[0].set_title('True Output')
            im1 = axes[1].imshow(sample_output_pred, cmap='viridis',norm=mcolors.Normalize(vmin=vmin, vmax=vmax))
            # fig.colorbar()  # 添加颜色条
            axes[1].set_title('CNN Predicted Output')
            plt.text(1.5, 110, f"R_error:{relative_test_error}", fontsize=12, color='red')
            # fig.colorbar(im1,ax=axes[1])

            # 添加统一的颜色条
            # fig.colorbar(im0, ax=axes, orientation='vertical', fraction=0.046, pad=0.04)
            # 添加统一的颜色条
            plt.subplots_adjust(wspace=0.01)
            cbar_ax = fig.add_axes([0.9, 0.15, 0.02, 0.7])  # [left, bottom, width, height]
            fig.colorbar(im0, cax=cbar_ax)

            plt.show()

            # 保存输出的图中相对误差最小的图像到本地
            if(idx == m-1):
                min_error_index = np.argmin(all_relative_test_error)
                min_error = all_relative_test_error[min_error_index]
                output_batch_idx = outputs.shape[0] - min_error_index - 1  #
                # print("plot_output shape:",sample_output.shape)
                # print("plot_pred shape:", sample_output.shape)
                sample_output = targets[output_batch_idx, 0, :, :].cpu().numpy()
                sample_output_pred = outputs[output_batch_idx, 0, :, :].cpu().numpy()
                # 计算相对测试误差
                target = torch.tensor(sample_output).contiguous().view(-1, 101 * 51)
                predict = torch.tensor(sample_output_pred).contiguous().view(-1, 101 * 51)
                fig, axes = plt.subplots(1, 2, figsize=(12, 6))
                vmin = min(sample_output.min(), sample_output_pred.min())
                vmax = max(sample_output.max(), sample_output_pred.max())  # 为了添加统一的color bar
                im0 = axes[0].imshow(sample_output, cmap='viridis',
                         norm=mcolors.Normalize(vmin=vmin, vmax=vmax))  # import matplotlib.colors as mcolors
                # fig.colorbar(im0,ax=axes[0])  # 添加颜色条
                axes[0].set_title('True Output')
                im1 = axes[1].imshow(sample_output_pred, cmap='viridis', norm=mcolors.Normalize(vmin=vmin, vmax=vmax))
                # fig.colorbar()  # 添加颜色条
                axes[1].set_title('CNN Predicted Output')
                plt.text(1.5, 110, f"R_error:{relative_test_error}", fontsize=12, color='red')
                # fig.colorbar(im1,ax=axes[1])

                # 添加统一的颜色条
                # fig.colorbar(im0, ax=axes, orientation='vertical', fraction=0.046, pad=0.04)
                # 添加统一的颜色条
                plt.subplots_adjust(wspace=0.01)
                cbar_ax = fig.add_axes([0.9, 0.15, 0.02, 0.7])  # [left, bottom, width, height]
                fig.colorbar(im0, cax=cbar_ax)
                plt.savefig(output_path, dpi=300, bbox_inches='tight')
                plt.close()  # 关闭图像，避免显示






    # 计算相对测试误差
    all_targets = np.concatenate(all_targets, axis=0)  # 合并所有真实值
    all_outputs = np.concatenate(all_outputs, axis=0)  # 合并所有预测值
    relative_test_error = np.sqrt(np.sum((all_outputs - all_targets) ** 2)) / np.sqrt(
        np.sum(all_targets ** 2))  # 计算相对误差
    print(f"Relative Test Error: {relative_test_error:.4e}")


    return test_loss


if __name__ == "__main__":
    Mode = 0
    Model_type_name = "train model" if Mode ==1 else "load model"
    print(Model_type_name)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 假设你有训练数据和目标数据
    # train_inputs = torch.randn(1200, 1, 101, 51).to(device)  # 训练输入
    # train_targets = torch.randn(1200, 6, 101, 51).to(device)  # 训练目标
    # test_inputs = torch.randn(300, 1, 101, 51).to(device)  # 测试输入
    # test_targets = torch.randn(300, 6, 101, 51).to(device)  # 测试目标
    #
    # # 创建数据集
    # train_dataset = TensorDataset(train_inputs, train_targets)
    # test_dataset = TensorDataset(test_inputs, test_targets)
    #
    # # 创建数据加载器
    # train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True)
    # test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False)

    # initialization
    batch_size = 64
    lr = 0.01
    num_epochs = 1000
    train_print_gap = 10

    input_file = r"..\data\input_1800.npy"  # 输入数据文件路径
    output_file = r"..\data\interpolate_output_1800_surface.npy"  # 输出数据文件路径
    input_data = np.load(input_file)  # 加载输入数据
    input_shape = input_data.shape
    # print(input_shape)
    input_data = input_data.reshape(input_shape[0], 1, input_shape[1], input_shape[2])
    input_data = torch.tensor(input_data)  # (1800,1,101,51)
    # print("input shape is:",input_shape)
    prop = 1e6  # 比例因子
    output_data = np.load(output_file, allow_pickle=True) * prop  # 加载输出数据,乘以一个比例因子，近似线性
    output_data = torch.tensor(output_data).permute(0, 3, 1, 2)  #(1800,6,101,51)
    # print("output shape is:",output_data.shape)

    ## 数据检测
    # plt.figure(figsize=(8, 6))  # 设置图像大小
    # plt.imshow(input_data[0,0,:,:], cmap='viridis')  # 使用 'viridis' 颜色映射
    # plt.colorbar()  # 添加颜色条
    # plt.title('Input Visualization')  # 添加标题
    # plt.show()

    # for indx in range(20):
    #     plt.imshow(output_data[1799-indx,0,:,:], cmap='viridis')  # 使用 'viridis' 颜色映射
    #     plt.colorbar()  # 添加颜色条
    #     plt.title('Output Visualization')  # 添加标题
    #     plt.show()

    # 划分训练集和测试集
    train_size = int(0.8 * input_shape[0])  # 训练集大小（80%）
    val_size = int(0.0 * (input_shape[0]))  # 验证集大小（10%）
    test_size = (input_shape[0]) - train_size - val_size  # 测试集大小（10%）

    train_input = (input_data[:train_size]).float().to(device)
    train_target = (output_data[:train_size]).float().to(device)
    test_input = (input_data[train_size + val_size:train_size + test_size + val_size]).float().to(device)
    test_target = (output_data[train_size + val_size:train_size + test_size + val_size]).float().to(device)

    train_dataset = TensorDataset(train_input, train_target)
    # val_dataset = TensorDataset(val_input,val_target)
    test_dataset = TensorDataset(test_input, test_target)
    # print(train_input.shape, train_target.shape)

    train_dataloader = DataLoader(train_dataset, batch_size, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size, shuffle=False)

    # 初始化模型
    model = CNNModel().to(device)
    # model.load_state_dict(torch.load("strain_cnn_model.pth"))

    if Mode == 1:
        start_time = time.time()
        # 定义损失函数和优化器
        criterion = nn.MSELoss()  # 使用均方误差损失
        optimizer = optim.Adam(model.parameters(), lr)
        scheduler = StepLR(optimizer, step_size=500, gamma=0.8)
        # 训练模型
        train_model(model, train_dataloader, criterion, optimizer, num_epochs)
        train_end_time = time.time()
        print(f"training the model costs {train_end_time - start_time:.4f}s")
        torch.save(model.state_dict(), "..\model\strain_cnn_model.pth")
        # 测试模型
        test_model(model, test_dataloader, criterion)
    else:
        model.load_state_dict(torch.load("..\model\strain_cnn_model.pth"))
        criterion = nn.MSELoss()  # 使用均方误差损失
        test_model(model, test_dataloader, criterion)

    # # 绘制网络的结构图
    # # 创建一个示例输入张量
    # dummy_input = torch.randn(1,1,101,51).to(device)
    #
    # # 进行一次前向传播以获取输出
    # output = model(dummy_input)
    # # 使用 torchviz 生成计算图
    # dot = make_dot(output, params=dict(model.named_parameters()))
    # dot.render('cnn_model_structure', format='pdf', view=True) # pdf 格式会比 png 格式更清晰