import torch
import random
import copy
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import numpy as np
import cv2
#调整优化器的学习率，lr*gamma的global_step次方
def adjust_learning_rate(optimizer, gamma, global_step):
    lr = 1e-3 * (gamma ** global_step)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

#验证
def validation(model, valid_loader, device, criterion, writer, train_step,epoch):
    cnt = 0  # validation中所有图片的数量
    total_acc = 0  # 所有图片的accuracy的和
    total_loss = 0  # 所有图片的loss的和
    model.to(device)
    #设置为评估模式
    model.eval()
    with torch.no_grad():
        for images, labels in valid_loader:
            images = images.view(-1, 256, 256, 1).to(device)
            labels = labels.view(-1, 1).to(device)
            labels = torch.squeeze(labels)
            cnt += images.shape[0]
            #model_output1代表隐写任务的输出
            model_output1,model_output2,feature_tmp,inputs = model(images)
            #计算损失
            loss = criterion(model_output1, labels)
            total_loss += loss.item()
            # 计算accuracy
            model_label = torch.max(model_output1, dim=1)[1]
            #比较任务1输出最大值的索引与隐写任务的标签
            temp_acc = torch.eq(model_label, labels).sum().item()
            total_acc += temp_acc
        #计算平均准确率与损失
        avg_acc = total_acc / cnt
        avg_loss = total_loss / cnt
        writer.add_scalar('valid_accuracy', avg_acc, train_step)
        writer.add_scalar('valid_loss', avg_loss, train_step)
        print('__________currIter: %d || valid_loss: %.6f || valid_acc: %.6f__________' % (train_step, avg_loss, avg_acc))
        #visualize_tsne(model, valid_loader, epoch)
        #print('TSNE DONE')
        return avg_acc


def train(model,
          train_loader,
          valid_loader,
          test_loader,
          optimizer,
          criterion,
          device,
          EPOCHS,
          valid_interval=5000,
          save_interval=5000,
          write_interval=100,
          load_path=None):

    model.to(device)
    total_loss = 0
    total_loss1 = 0
    total_loss2 = 0
    
    total_task1_acc = 0
    total_task2_acc = 0
    lr_adjust_step = 0
    model_save_cnt = 0
    #生成tensorboard文件
    writer = SummaryWriter(log_dir='3x3_0.4bpp_HUGO')
    # 加载之前的模型
    if load_path is not None:
        print("_________load model______________")
        model.load_state_dict(torch.load(load_path))
    best_model_state_path = None    
    highest_accuracy = 0.0
    acc1_history = []
    train_main_losses=[]
    train_aux_losses=[]
    train_main_accuracies=[]
    train_aux_accuracies=[]
    test_main_accuracies=[]
    converged = False #设置收敛标志
    train_step = 0
    count=0
    for epoch in range(1, EPOCHS + 1):
        print('start-epoch:%d||train_step:%d'%(epoch,train_step))
        if converged: #如果收敛就跳出循环
            break
        for index, (images, labels) in enumerate(train_loader):
            train_step += 1
            images = images.view(-1, 256, 256, 1).to(device)
            labels = labels.view(64,-1).to(device)
            labels = torch.squeeze(labels)
            count +=images.shape[0]
            #print('images.shape: ', images.shape, 'labels.shape: ', labels.shape)
            #print('labels.center',labels)
            optimizer.zero_grad()
            main_task_output,aux_task_output,feature_tmp,inputs=model(images)
            #print(main_task_output)
            #任务1索引，得出0、1值来判断是否隐写
            task1_label = torch.max(main_task_output, dim=1)[1]
            #print('task1_label',task1_label)
            #任务2索引，得出0、1值来判断是否滤波
            task2_label = torch.max(aux_task_output,dim=1)[1]
            # 计算任务1的准确率
            task1_acc = torch.eq(task1_label, labels[:, 0]).sum().item()#对比任务1索引与训练集标签的第一列
            total_task1_acc += task1_acc
            # 计算任务2的准确率
            task2_acc = torch.eq(task2_label, labels[:, 1]).sum().item()#对比任务2索引与训练集标签的第二列
            total_task2_acc += task2_acc
            #计算损失值
            loss1=criterion(main_task_output,labels[:,0])
            loss2=criterion(aux_task_output,labels[:,1])
            #固定权重方法
            loss=0.9*loss1 + 0.1*loss2     
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
            total_loss1 += loss1.item()
            total_loss2 += loss2.item()
        if epoch % 1 == 0:
            # 在writer中保存accuracy和loss的值
            temp_acc_1 = total_task1_acc / count
            temp_loss1=total_loss1 / count
            #任务2的准确率与损失
            temp_acc_2 = total_task2_acc / count
            temp_loss2=total_loss2 / count
            print('EPOCH: %d/%d || currIter: %d || train_loss_task1: %.6f || train_acc_task1: %.6f' %
                    (epoch, EPOCHS, train_step, temp_loss1, temp_acc_1))
            writer.add_scalar('train_accuracy_task1', temp_acc_1, epoch)
            writer.add_scalar('train_loss_task1', temp_loss1, epoch)
            writer.add_scalar('train_accuracy_task2', temp_acc_2, epoch)
            writer.add_scalar('train_loss_task2', temp_loss2, epoch)
            train_main_losses.append(temp_loss1)
            train_main_accuracies.append(temp_acc_1)
            train_aux_losses.append(temp_loss2)
            train_aux_accuracies.append(temp_acc_2)
            total_loss1 = 0
            total_loss2 = 0
            total_task1_acc = 0
            total_task2_acc = 0
            total_loss = 0
            count = 0
        if epoch % 1 == 0:
            # 在validation set上测试一次
            v_acc1=validation(model, valid_loader, device, criterion, writer, train_step,epoch)
            #存储测试准确率
            acc1_history.append(v_acc1)
            # 检查是否收敛，准确率连续25次大于0.85且波动不超过0.01
            if v_acc1 > 0.85 and len(acc1_history) >= 25 and max(acc1_history[-25:]) - min(acc1_history[-25:]) < 0.01:
                #若收敛，将收敛标志置为true，跳出循环
                converged = True
                break
            # 若不收敛，更新最高准确率和最佳模型状态
            if v_acc1 > highest_accuracy:
                highest_accuracy = v_acc1
                best_model_state = copy.deepcopy(model.state_dict())
            test_accuracy = test_acc(model, test_loader, device, criterion, writer, train_step,epoch)
            test_main_accuracies.append(test_accuracy)
            print("______Test Accuracy_____:", test_accuracy)
        
            
        if train_step % save_interval == 0:
            # 保存模型
            torch.save(model.state_dict(), './Model_' + str(train_step) + '.pth')
            model_save_cnt += 1

        if epoch == 80 or epoch == 120:
            lr_adjust_step += 1
            adjust_learning_rate(optimizer, 0.1, lr_adjust_step)
        
        # 在训练循环末尾添加
        if epoch % 1 == 0:  # 每5个epoch可视化一次
            
            # 可视化样本热力图
            sample_data, _ = next(iter(train_loader))
            sample = sample_data[0]  # 取第一个样本
            sample = sample.view(-1, 256, 256, 1).to(device)
            generate_heatmap(model, sample, epoch,device)
            # 更新指标图
            plot_metrics(train_main_losses, train_aux_losses,
                train_main_accuracies, train_aux_accuracies,
                test_main_accuracies)
    #收敛后，保存最佳模型            
    if best_model_state:
        torch.save(best_model_state,
                       f'./bestModel_{train_step}_{highest_accuracy:.4f}.pth')
        best_model_state_path = f'./bestModel_{train_step}_{highest_accuracy:.4f}.pth'
        print(f'Saved best model with accuracy {highest_accuracy:.4f} ')

    writer.close()


def test(model, test_loader, device, criterion,weight_path=None):
    model.to(device)
    # 加载之前训练好的模型
    assert weight_path is not None, 'weight_path is None, please change weight_path'
    model.load_state_dict(torch.load(weight_path))
    cnt = 0  # 测试集中所有图片的数量
    total_acc = 0  # 所有图片的accuracy的总和
    total_loss = 0  # 所有图片的loss的总和
    model.to(device)

    model.eval()
    with torch.no_grad():
        for images, labels in test_loader:
            images = images.view(-1, 256, 256, 1).to(device)
            labels = labels.view(-1, 1).to(device)
            labels = torch.squeeze(labels)

            cnt += images.shape[0]

            model_output1,model_output2,feature_tmp,inputs = model(images)
            loss = criterion(model_output1, labels)
            total_loss += loss.item()
            # 计算accuracy
            model_label = torch.max(model_output1, dim=1)[1]
            temp_acc = torch.eq(model_label, labels).sum().item()
            total_acc += temp_acc

        avg_acc = total_acc / cnt
        avg_loss = total_loss / cnt

        print('test_loss: %.6f || test_acc: %.6f' % (avg_loss, avg_acc))
        return avg_acc
#验证
def test_acc(model, test_loader, device, criterion, writer, train_step,epoch):
    cnt = 0  # validation中所有图片的数量
    total_acc = 0  # 所有图片的accuracy的和
    total_loss = 0  # 所有图片的loss的和
    model.to(device)
    #设置为评估模式
    model.eval()
    with torch.no_grad():
        for images, labels in test_loader:
            images = images.view(-1, 256, 256, 1).to(device)
            labels = labels.view(-1, 1).to(device)
            labels = torch.squeeze(labels)
            cnt += images.shape[0]
            #model_output1代表隐写任务的输出
            model_output1,model_output2,feature_tmp,inputs = model(images)
            #计算损失
            loss = criterion(model_output1, labels)
            total_loss += loss.item()
            # 计算accuracy
            model_label = torch.max(model_output1, dim=1)[1]
            #比较任务1输出最大值的索引与隐写任务的标签
            temp_acc = torch.eq(model_label, labels).sum().item()
            total_acc += temp_acc
        #计算平均准确率与损失
        avg_acc = total_acc / cnt
        avg_loss = total_loss / cnt
        writer.add_scalar('test_accuracy', avg_acc, epoch)
        writer.add_scalar('test_loss', avg_loss, epoch)
        print('__________currIter: %d || test_loss: %.6f || test_acc: %.6f__________' % (train_step, avg_loss, avg_acc))
        
        return avg_acc

'''
def generate_heatmap(model, sample_data, epoch,device):
    model.eval()
    with torch.no_grad():
        #print("sample_data.shape",sample_data.shape)
        #adjusted_data = sample_data.permute(1, 2, 0, 3).squeeze(-1)  # [256, 256, 4]
        #adjusted_data = adjusted_data.unsqueeze(0)  # [1, 256, 256, 4]
        #print("adjust_data.shape",adjusted_data.shape)
        _, _, _, orig_img = model(sample_data.to(device))  # 获取原始图像
    
    # 获取第八层激活
    activation = model.activation['layer8'].squeeze()
    
    # 处理激活图
    heatmap = torch.mean(activation, dim=0).cpu().numpy()
    heatmap = np.maximum(heatmap, 0)
    heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min() + 1e-8)
    
    # 处理原始图像
    img = orig_img.squeeze().cpu().numpy()
    #img = (img - img.min()) / (img.max() - img.min())
    # 如果图像形状是 (C, H, W)，转换为 (H, W, C)
    if img.ndim == 3 and img.shape[0] < img.shape[1] and img.shape[0] < img.shape[2]:
        img = np.transpose(img, (1, 2, 0))
    
    # 归一化图像
    img = (img - img.min()) / (img.max() - img.min())
    
    # 如果仍有多个通道，使用第一个通道或取平均
    if img.ndim == 3:
        if img.shape[2] == 4:  # RGBA图像可直接显示
            pass
        elif img.shape[2] == 3:  # RGB图像可直接显示
            pass
        else:
            # 多通道图像：使用第一个通道作为灰度图
            img = img[..., 0]
    # 创建可视化
    plt.figure(figsize=(15, 6), dpi=100)
    
    # 原始图像
    plt.subplot(1, 3, 1)
    plt.imshow(img, cmap='gray')
    plt.title('Original Image')
    plt.axis('off')
    
    # 激活热力图
    plt.subplot(1, 3, 2)
    plt.imshow(heatmap, cmap='viridis')
    plt.title('Layer8 Activation Map')
    plt.colorbar(fraction=0.046, pad=0.04)
    plt.axis('off')
    
    # 叠加显示
    plt.subplot(1, 3, 3)
    plt.imshow(img, cmap='gray')
    plt.imshow(cv2.resize(heatmap, img.shape), 
              alpha=0.5, 
              cmap='jet',
              interpolation='bicubic')
    plt.title('Overlay Visualization')
    plt.axis('off')
    
    # 保存结果
    plt.savefig(f'./layer8_heatmap_epoch{epoch}.png',
               bbox_inches='tight',
               pad_inches=0.1)
    plt.close()
'''
def generate_heatmap(model, sample_data, epoch, device):
    model.eval()
    # 确保输入数据格式正确（NHWC）
    if sample_data.dim() == 3:
        sample_data = sample_data.unsqueeze(0)  # 添加批次维度
    sample_data = sample_data.to(device)
    
    with torch.no_grad():
        _, _, _, orig_img = model(sample_data)  # 获取原始图像
    
    # 获取第八层激活 - 现在应该是4维 [batch_size, C, H, W]
    activation = model.activation['layer8']
    
    # 处理激活图 - 适应批次维度
    print(f"Activation shape (before processing): {activation.shape}")
    
    # 选择批次中的第一个样本（您可以根据需要选择）
    selected_index = 0  # 选择第一个样本生成热力图
    activation = activation[selected_index]  # 形状变为 [C, H, W]
    
    # 计算热力图 - 取通道维度的平均
    heatmap = torch.mean(activation, dim=0).cpu().numpy()  # 变为 [H, W]
    
    # 归一化处理
    heatmap = np.maximum(heatmap, 0)
    heatmap_min = heatmap.min()
    heatmap_max = heatmap.max()
    heatmap = (heatmap - heatmap_min) / (heatmap_max - heatmap_min + 1e-8)
    
    print(f"Heatmap shape: {heatmap.shape}")
    
    # 处理原始图像 - 同样选择第一个样本
    # 模型返回的orig_img形状为 [batch_size, C, H, W]
    img = orig_img[selected_index].squeeze().cpu().numpy()  # 移除批次维度
    
    # 转换通道顺序 (CHW -> HWC) 用于显示
    if img.ndim == 3 and img.shape[0] < img.shape[1] and img.shape[0] < img.shape[2]:
        img = np.transpose(img, (1, 2, 0))
    
    # 处理多通道图像
    if img.ndim == 3:
        if img.shape[-1] in [1, 3, 4]:  # 如果是灰度、RGB或RGBA
            # 将单通道灰度图转换为2D
            if img.shape[-1] == 1:
                img = img[..., 0]
        else:
            # 对于非常规的多通道图像，只使用第一个通道
            img = img[..., 0]
    
    # 归一化图像
    img_min = img.min()
    img_max = img.max()
    img = (img - img_min) / (img_max - img_min + 1e-8)
    
    # 确保热力图是2D（H x W）
    if heatmap.ndim != 2:
        raise ValueError(f"Heatmap should be 2-dimensional (H, W), got {heatmap.ndim} dimensions")
    
    # 获取原始图像尺寸
    img_height, img_width = img.shape[:2]
    
    # 打印调试信息
    print(f"Processed image shape: {img.shape}")
    
    # 创建可视化
    plt.figure(figsize=(15, 6), dpi=100)
    
    # 原始图像
    plt.subplot(1, 3, 1)
    plt.imshow(img, cmap='gray')
    plt.title('Original Image')
    plt.axis('off')
    
    # 激活热力图
    plt.subplot(1, 3, 2)
    plt.imshow(heatmap, cmap='viridis')
    plt.title('Layer8 Activation Map')
    plt.colorbar(fraction=0.046, pad=0.04)
    plt.axis('off')
    
    # 叠加显示
    plt.subplot(1, 3, 3)
    plt.imshow(img, cmap='gray')
    
    # 调整热力图尺寸以匹配原始图像
    resized_heatmap = cv2.resize(heatmap, (img_width, img_height))
    
    plt.imshow(resized_heatmap, 
               alpha=0.5, 
               cmap='jet',
               interpolation='bicubic')
    
    plt.title('Overlay Visualization')
    plt.axis('off')
    
    # 保存结果
    plt.savefig(f'./layer8_heatmap_epoch{epoch}.png',
               bbox_inches='tight',
               pad_inches=0.1)
    plt.close()
    
    print(f"Saved heatmap for epoch {epoch} (batch index {selected_index})")
# 定义绘制损失函数变化趋势的函数
def plot_loss(loss_values):
    plt.plot(loss_values, label='Training Loss')
    plt.title('Training Loss over Epochs')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.savefig('./loss_plot.png')  # 保存图表为loss_plot.png
    plt.close()

def plot_test_accuracy(accuracy_values):
    """
    绘制测试准确率变化趋势的函数。

    参数:
    - accuracy_values: 一个包含每个 epoch 测试准确率的列表。
    """
    plt.plot(accuracy_values, label='Test Accuracy')  # , marker='o'使用圆圈标记每个数据点
    plt.title('Test Accuracy over Epochs')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True)  # 添加网格线以便更清楚地看到变化趋势
    plt.savefig('./test_accuracy_plot.png')  # 保存图表为 test_accuracy_plot.png
    plt.close()  # 关闭图表，避免在不同调用中出现重叠

def plot_metrics(train_main_loss, train_aux_loss, 
                train_main_acc, train_aux_acc,
                test_main_acc):
    plt.figure(figsize=(15,10))
    
    # 损失曲线
    plt.subplot(2,2,1)
    plt.plot(train_main_loss, label='Main Task Loss')
    plt.plot(train_aux_loss, label='Aux Task Loss')
    plt.title('Training Loss')
    plt.legend()
    
    # 训练准确率
    plt.subplot(2,2,2)
    plt.plot(train_main_acc, label='Main Task Acc')
    plt.plot(train_aux_acc, label='Aux Task Acc')
    plt.title('Training Accuracy')
    plt.legend()

    
    plt.tight_layout()
    plt.savefig('./training_metrics.png')
    plt.close()
    
def visualize_tsne(model, dataloader, epoch):
    model.eval()
    features = []
    labels = []
    
    with torch.no_grad():
        for data, target in dataloader:
            #if args.cuda:
                #data = data.cuda()
            # 获取特征（需要修改模型forward返回特征）
            if data.dim() == 5: 
                data = data.squeeze(dim=1)  # 或根据实际情况调整维度
            _, _, feat,inputimage = model(data)
            features.append(feat.cpu())
            labels.append(target.cpu())
    
    features = torch.cat(features).numpy()
    labels = torch.cat(labels).numpy()
    
    # t-SNE降维
    tsne = TSNE(n_components=2, random_state=42)
    projections = tsne.fit_transform(features)
    
    plt.figure(figsize=(10,10))
    plt.scatter(projections[:,0], projections[:,1], 
                c=labels, cmap='tab10', alpha=0.6)
    plt.colorbar()
    plt.title(f't-SNE Visualization (Epoch {epoch})')
    plt.savefig(f'./lwe_test_layer8_mtlsuni0.4/tsne_epoch{epoch}.png')
    plt.close()
