"""GANomaly
"""
# pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915

##
from collections import OrderedDict
import os
import time
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt

from torch.autograd import Variable
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
import torchvision.utils as vutils

from lib.networks import NetG, NetD, weights_init
from lib.visualizer import Visualizer
from lib.loss import l2_loss, semi_supervised_info_nce_loss
from lib.evaluate import evaluate


class BaseModel():
    """ Base Model for ganomaly
    """

    def __init__(self, opt, dataloader):
        ##
        # Seed for deterministic behavior
        self.seed(opt.manualseed)

        # Initalize variables.
        self.opt = opt
        self.dataloader = dataloader
        self.trn_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
        self.tst_dir = os.path.join(self.opt.outf, self.opt.name, 'test')
        self.device = torch.device("cuda:0" if self.opt.device != 'cpu' else "cpu")
        self.visualizer = Visualizer(opt, model=self, dataloader=dataloader, device=self.device)

        # 初始化loss记录器
        self.loss_history = {
            'err_d': [],
            'err_g': [],
            'err_g_adv': [],
            'err_g_con': [],
            'err_g_enc': [],
        }

        if self.opt.use_semi:
            self.loss_history['err_cls'] = []
        if self.opt.use_contrast:
            self.loss_history['err_contrast'] = []
        if self.opt.use_latent_reg:
            self.loss_history['err_latent_reg'] = []
        if self.opt.use_push:
            self.loss_history['err_push'] = []
            # 新增高低频损失记录
        if self.opt.use_hlfd:
            self.loss_history['err_g_con_low'] = []
            self.loss_history['err_g_con_high'] = []
        # 性能跟踪
        self.performance_history = {'auc': [], 'best_auc': 0}

    ##
    def set_input(self, input: torch.Tensor):
        """ Set input and ground truth

        Args:
            input (FloatTensor): Input data for batch i.
        """
        with torch.no_grad():
            self.input.resize_(input[0].size()).copy_(input[0])
            self.gt.resize_(input[1].size()).copy_(input[1])
            self.label.resize_(input[1].size())

            # Copy the first batch as the fixed input.
            if self.total_steps == self.opt.batchsize:
                self.fixed_input.resize_(input[0].size()).copy_(input[0])

    ##
    def seed(self, seed_value):
        """ Seed

        Arguments:
            seed_value {int} -- [description]
        """
        # Check if seed is default value
        if seed_value == -1:
            return

        # Otherwise seed all functionality
        import random
        random.seed(seed_value)
        torch.manual_seed(seed_value)
        torch.cuda.manual_seed_all(seed_value)
        np.random.seed(seed_value)
        torch.backends.cudnn.deterministic = True

    ##
    def get_errors(self):
        """ Get netD and netG errors.

        Returns:
            [OrderedDict]: Dictionary containing errors.
        """

        errors = OrderedDict([
            ('err_d', self.err_d.item()),
            ('err_g', self.err_g.item()),
            ('err_g_adv', self.err_g_adv.item()),
            ('err_g_con', self.err_g_con.item()),
            ('err_g_enc', self.err_g_enc.item())])

        # 添加新增的损失分量
        if hasattr(self, 'err_contrast'):
            errors['err_contrast'] = self.err_contrast.item() if self.err_contrast != 0 else 0.0
        if hasattr(self, 'err_push'):
            errors['err_push'] = self.err_push.item() if self.err_push != 0 else 0.0
        if hasattr(self, 'err_latent_reg'):
            errors['err_latent_reg'] = self.err_latent_reg.item() if self.err_latent_reg != 0 else 0.0
        # 添加高低频损失
        if hasattr(self, 'err_g_con_low'):
            errors['err_g_con_low'] = self.err_g_con_low.item() if self.err_g_con_low != 0 else 0.0
        if hasattr(self, 'err_g_con_high'):
            errors['err_g_con_high'] = self.err_g_con_high.item() if self.err_g_con_high != 0 else 0.0


        # 半监督模式下添加分类损失日志
        if self.opt.use_semi:
            errors['err_cls'] = self.err_cls.item()
        return errors

    ##
    def get_current_images(self):
        """ Returns current images.

        Returns:
            [reals, fakes, fixed]
        """

        reals = self.input.data
        fakes = self.fake.data
        fixed = self.netg(self.fixed_input)[0].data

        return reals, fakes, fixed

    ##
    def save_weights(self, epoch):
        """Save netG and netD weights for the current epoch.

        Args:
            epoch ([int]): Current epoch number.
        """

        weight_dir = os.path.join(self.opt.outf, self.opt.name, 'train', 'weights')
        if not os.path.exists(weight_dir): os.makedirs(weight_dir)

        torch.save({'epoch': epoch + 1, 'state_dict': self.netg.state_dict()},
                   '%s/netG.pth' % (weight_dir))
        torch.save({'epoch': epoch + 1, 'state_dict': self.netd.state_dict()},
                   '%s/netD.pth' % (weight_dir))

        # 保存训练配置
        config_path = os.path.join(weight_dir, 'training_config.txt')
        with open(config_path, 'w') as f:
            f.write(f"Best AUC: {self.performance_history['best_auc']:.4f}\n")
            f.write(f"Best Epoch: {epoch + 1}\n")
            f.write(f"Learning Rate: {self.opt.lr}\n")
            # f.write(f"Weight Decay: {self.opt.get('weight_decay', 1e-5)}\n")



    ##
    def train_one_epoch(self):
        """ Train the model for one epoch.
        """
        self.netg.train()
        epoch_iter = 0
        train_recon_dir = os.path.join(self.opt.outf, self.opt.name, 'train', 'reconstructions')
        os.makedirs(train_recon_dir, exist_ok=True)  # 确保目录存在

        # 初始化epoch内的loss累加器
        epoch_losses = {
            'err_d': 0.0,
            'err_g': 0.0,
            'err_g_adv': 0.0,
            'err_g_con': 0.0,
            'err_g_enc': 0.0,
        }
        if self.opt.use_push:
            epoch_losses['err_push'] = 0.0
        if self.opt.use_latent_reg:
            epoch_losses['err_latent_reg'] = 0.0
        if self.opt.use_contrast:
            epoch_losses['err_contrast'] = 0.0
        if self.opt.use_semi:
            epoch_losses['err_cls'] = 0.0
        if self.opt.use_hlfd:
            epoch_losses['err_g_con_low'] = 0.0
            epoch_losses['err_g_con_high'] = 0.0
        batch_count = 0

        for data in tqdm(self.dataloader['train'], leave=False, total=len(self.dataloader['train'])):
            self.total_steps += self.opt.batchsize
            epoch_iter += self.opt.batchsize
            batch_count += 1

            self.set_input(data)
            self.optimize_params()

            # 累加loss值
            errors = self.get_errors()
            for key in epoch_losses.keys():
                epoch_losses[key] += errors[key]

            # 打印损失日志
            if self.total_steps % self.opt.print_freq == 0:
                if self.opt.display:
                    counter_ratio = float(epoch_iter) / len(self.dataloader['train'].dataset)
                    self.visualizer.plot_current_errors(self.epoch, counter_ratio, errors)
            # 保存图像及插入重建对比可视化
            if self.total_steps % self.opt.save_image_freq == 0:
                reals, fakes, fixed = self.get_current_images()
                self.visualizer.save_current_images(self.epoch, reals, fakes, fixed)
                # 生成并保存真实图像与重建图像的对比图（用之前定义的工具函数）
                from lib.interpretability import plot_reconstruction_comparison  # 导入工具函数
                plot_reconstruction_comparison(
                    real=reals,  # 真实图像
                    fake=fakes,  # 重建图像
                    save_path=os.path.join(train_recon_dir, f"recon_epoch{self.epoch}_step{self.total_steps}.png"),
                    n=5  # 展示5个样本
                )
                # ---------------------------------------------------------

                if self.opt.display:
                    self.visualizer.display_current_images(reals, fakes, fixed)

        # 计算epoch的平均loss
        avg_losses = {}
        for key in epoch_losses.keys():
            avg_losses[key] = epoch_losses[key] / batch_count
            self.loss_history[key].append(avg_losses[key])

        # 打印扩展的epoch平均loss
        print(f"\nEpoch {self.epoch + 1}/{self.opt.niter} Average Losses:")
        print(f"  Discriminator Loss (err_d): {avg_losses['err_d']:.6f}")
        print(f"  Generator Total Loss (err_g): {avg_losses['err_g']:.6f}")
        print(f"    - Base Components:")
        print(f"      * Adversarial Loss (err_g_adv): {avg_losses['err_g_adv']:.6f}")
        print(f"      * Reconstruction Loss (err_g_con): {avg_losses['err_g_con']:.6f}")
        print(f"      * Encoder Loss (err_g_enc): {avg_losses['err_g_enc']:.6f}")
        print(f"    - Frequency Components:")
        print(f"      * Low Frequency Loss: {avg_losses['err_g_con_low']:.6f}")
        print(f"      * High Frequency Loss: {avg_losses['err_g_con_high']:.6f}")
        print(f"    - Enhanced Components:")
        print(f"      * Contrastive Loss (err_contrast): {avg_losses['err_contrast']:.6f}")
        # print(f"      * Push Loss (err_push): {avg_losses['err_push']:.6f}")
        # print(f"      * Latent Reg Loss (err_latent_reg): {avg_losses['err_latent_reg']:.6f}")
        if self.opt.use_semi:
            print(f"  Classification Loss (err_cls): {avg_losses['err_cls']:.6f}")
        print(">> Training model %s. Epoch %d/%d" % (self.name, self.epoch + 1, self.opt.niter))

        # 每10个epoch可视化一次高低频分离效果
        if (self.epoch + 1) % 10 == 0:
            self.visualizer.visualize_freq_separation(self.epoch + 1)

        # ==================== Epoch结束时的判别器重初始化逻辑 ====================
        if hasattr(self, 'last_reinit_epoch'):
            epochs_since_reinit = self.epoch - self.last_reinit_epoch
        # 如果是第一次检查，初始化last_reinit_epoch
        else:
            self.last_reinit_epoch = -10
            epochs_since_reinit = 0
            self.reinit_count = 0
        # 基于epoch平均损失判断是否需要重初始化
        avg_err_d = avg_losses['err_d']
        avg_err_g_adv = avg_losses['err_g_adv']
        if (avg_err_d < 1e-4 or (avg_err_g_adv < 0.01 and self.epoch > 50)):
            # 确保至少间隔5个epoch才重初始化一次
            if epochs_since_reinit >= 5:
                self.reinit_d()
                self.last_reinit_epoch = self.epoch
                self.reinit_count += 1
                # 重初始化后，小幅调整判别器学习率
                for param_group in self.optimizer_d.param_groups:
                    param_group['lr'] = self.opt.lr * 0.6
                print(f"\n⚠️  Epoch {self.epoch + 1}: 判别器重初始化 (累计次数: {self.reinit_count})")
                print(f"   触发原因: err_d={avg_err_d:.6f}, err_g_adv={avg_err_g_adv:.6f}")
                print(f"   新学习率: {self.opt.lr * 0.6:.6e}")

        # 更新学习率调度器
        if hasattr(self, 'scheduler_g'):
            self.scheduler_g.step()
        if hasattr(self, 'scheduler_d'):
            self.scheduler_d.step()

    ##
    def train(self):
        """ Train the model
        """

        ##
        # TRAIN
        self.total_steps = 0
        best_auc = 0

        # Train for niter epochs.
        print(">> Training model %s." % self.name)
        for self.epoch in range(self.opt.iter, self.opt.niter):
            # Train for one epoch
            self.train_one_epoch()
            res = self.test()
            if res[self.opt.metric] > best_auc:
                best_auc = res[self.opt.metric]
                self.save_weights(self.epoch)
            self.visualizer.print_current_performance(res, best_auc)

        # 训练结束后生成loss曲线
        self.visualizer.plot_loss_curve(self.loss_history, self.performance_history)
        print(f">> Loss curve saved to: {os.path.join(self.opt.outf, self.opt.name, 'train', 'loss_plots')}")
        print(">> Training model %s.[Done]" % self.name)

    def test(self):
        """ Test GANomaly model with enhanced anomaly scoring and analysis.
        """
        with torch.no_grad():
            # 加载权重
            if self.opt.load_weights:
                path = "./output/{}/{}/train/weights/netG.pth".format(self.name.lower(), self.opt.dataset)
                pretrained_dict = torch.load(path, map_location=self.device)['state_dict']
                try:
                    self.netg.load_state_dict(pretrained_dict)
                except IOError:
                    raise IOError("netG weights not found")
                print('   Loaded weights.')

            self.opt.phase = 'test'
            self.netg.eval()  # 设置为评估模式

            # 初始化存储变量
            dataset_size = len(self.dataloader['test'].dataset)
            self.an_scores = torch.zeros(size=(dataset_size,), dtype=torch.float32, device=self.device)
            self.gt_labels = torch.zeros(size=(dataset_size,), dtype=torch.long, device=self.device)
            self.latent_i = torch.zeros(size=(dataset_size, self.opt.nz), dtype=torch.float32, device=self.device)
            self.latent_o = torch.zeros(size=(dataset_size, self.opt.nz), dtype=torch.float32, device=self.device)

            # 新增存储：重建误差、特征匹配误差、判别器置信度
            self.recon_errors = torch.zeros(size=(dataset_size,), dtype=torch.float32, device=self.device)
            self.latent_errors = torch.zeros(size=(dataset_size,), dtype=torch.float32, device=self.device)
            self.feature_errors = torch.zeros(size=(dataset_size,), dtype=torch.float32, device=self.device)
            self.confidence_scores = torch.zeros(size=(dataset_size,), dtype=torch.float32, device=self.device)

            # 可视化样本收集
            vis_normal_imgs = []
            vis_normal_fakes = []
            vis_anomaly_imgs = []
            vis_anomaly_fakes = []
            vis_max_samples = 5

            self.times = []
            self.total_steps = 0

            # 遍历测试集
            for i, data in enumerate(self.dataloader['test'], 0):
                time_i = time.time()
                self.set_input(data)

                # 前向传播
                self.fake, latent_i, latent_o = self.netg(self.input)
                latent_i = latent_i.squeeze()
                latent_o = latent_o.squeeze()

                # 获取判别器特征和输出
                pred_real, feat_real, _ = self.netd(self.input)
                pred_fake, feat_fake, _ = self.netd(self.fake)

                # 1. 计算多种误差分量
                # 潜在空间误差 (L2距离)
                latent_error = torch.mean(torch.pow(latent_i - latent_o, 2), dim=1)

                # 重建误差 (L1 + L2组合)
                l1_recon = torch.mean(torch.abs(self.fake - self.input), dim=[1, 2, 3])
                l2_recon = torch.mean(torch.pow(self.fake - self.input, 2), dim=[1, 2, 3])
                recon_error = 0.7 * l1_recon + 0.3 * l2_recon  # 组合重建误差

                # 特征匹配误差 (判别器特征空间)
                feat_error = torch.mean(torch.pow(feat_real - feat_fake, 2), dim=[1, 2, 3])

                # 判别器置信度分数 (离0.5越远越异常)
                confidence = torch.abs(pred_fake - 0.5).squeeze()

                # 2. 多维度异常分数融合 (自适应权重)
                # 对各分量进行归一化
                latent_error_norm = (latent_error - latent_error.min()) / (
                            latent_error.max() - latent_error.min() + 1e-8)
                recon_error_norm = (recon_error - recon_error.min()) / (recon_error.max() - recon_error.min() + 1e-8)
                feat_error_norm = (feat_error - feat_error.min()) / (feat_error.max() - feat_error.min() + 1e-8)
                confidence_norm = (confidence - confidence.min()) / (confidence.max() - confidence.min() + 1e-8)

                # 动态权重：根据训练数据的区分度调整
                if hasattr(self, 'score_weights'):
                    weights = self.score_weights
                else:
                    weights = {'latent': 0.4, 'recon': 0.3, 'feat': 0.2, 'confidence': 0.1}

                # 最终异常分数
                final_error = (weights['latent'] * latent_error_norm +
                               weights['recon'] * recon_error_norm +
                               weights['feat'] * feat_error_norm +
                               weights['confidence'] * confidence_norm)

                time_o = time.time()

                # 批量索引计算
                batch_size = self.input.size(0)
                batch_start = i * batch_size
                batch_end = batch_start + batch_size

                # 存储所有分数
                self.an_scores[batch_start:batch_end] = final_error
                self.recon_errors[batch_start:batch_end] = recon_error
                self.latent_errors[batch_start:batch_end] = latent_error
                self.feature_errors[batch_start:batch_end] = feat_error
                self.confidence_scores[batch_start:batch_end] = confidence
                self.gt_labels[batch_start:batch_end] = self.gt
                self.latent_i[batch_start:batch_end, :] = latent_i
                self.latent_o[batch_start:batch_end, :] = latent_o
                self.times.append(time_o - time_i)

                # 收集可视化样本
                if len(vis_normal_imgs) < vis_max_samples or len(vis_anomaly_imgs) < vis_max_samples:
                    for j in range(batch_size):
                        label = self.gt[j].item()
                        if label == 0 and len(vis_normal_imgs) < vis_max_samples:
                            vis_normal_imgs.append(self.input[j].detach())
                            vis_normal_fakes.append(self.fake[j].detach())
                        elif label == 1 and len(vis_anomaly_imgs) < vis_max_samples:
                            vis_anomaly_imgs.append(self.input[j].detach())
                            vis_anomaly_fakes.append(self.fake[j].detach())
                        if len(vis_normal_imgs) >= vis_max_samples and len(vis_anomaly_imgs) >= vis_max_samples:
                            break

                # 保存测试图像
                if self.opt.save_test_images:
                    dst = os.path.join(self.opt.outf, self.opt.name, 'test', 'images')
                    os.makedirs(dst, exist_ok=True)
                    real, fake, _ = self.get_current_images()
                    img_format = getattr(self.opt, 'test_image_format', 'png')
                    # 保存原始图像和重建图像
                    vutils.save_image(
                        real,
                        '%s/real_%03d.%s' % (dst, i + 1, img_format),
                        normalize=True,
                        nrow=int(real.size(0) ** 0.5)  # 自动计算网格布局
                    )
                    vutils.save_image(
                        fake,
                        '%s/fake_%03d.%s' % (dst, i + 1, img_format),
                        normalize=True,
                        nrow=int(fake.size(0) ** 0.5)
                    )

                    # 同时保存对比图像（真实vs重建）
                    comparison = torch.cat([real, fake], dim=0)
                    vutils.save_image(
                        comparison,
                        '%s/comparison_%03d.%s' % (dst, i + 1, img_format),
                        normalize=True,
                        nrow=real.size(0)  # 每行显示batch_size个图像
                    )
                    # vutils.save_image(real, '%s/real_%03d.eps' % (dst, i + 1), normalize=True)
                    # vutils.save_image(fake, '%s/fake_%03d.eps' % (dst, i + 1), normalize=True)

                self.total_steps += batch_size

            # 计算平均推理时间
            self.times = np.array(self.times)
            self.times = np.mean(self.times[:min(100, len(self.times))] * 1000)

            # 异常分数归一化
            min_score = torch.min(self.an_scores)
            max_score = torch.max(self.an_scores)
            if max_score > min_score:
                self.an_scores = (self.an_scores - min_score) / (max_score - min_score)
            else:
                self.an_scores = torch.zeros_like(self.an_scores)

            # 计算各种评估指标
            auc = evaluate(self.gt_labels, self.an_scores, metric=self.opt.metric)

            # 计算各分量单独的AUC
            recon_auc = evaluate(self.gt_labels, self.recon_errors, metric=self.opt.metric)
            latent_auc = evaluate(self.gt_labels, self.latent_errors, metric=self.opt.metric)
            feature_auc = evaluate(self.gt_labels, self.feature_errors, metric=self.opt.metric)

            performance = OrderedDict([
                ('Avg Run Time (ms/batch)', self.times),
                (self.opt.metric.lower(), auc),
                ('recon_auc', recon_auc),
                ('latent_auc', latent_auc),
                ('feature_auc', feature_auc)
            ])

            # 可视化性能指标
            if self.opt.display_id > 0 and self.opt.phase == 'test':
                counter_ratio = float(self.total_steps) / dataset_size
                self.visualizer.plot_performance(self.epoch, counter_ratio, performance)

            # 保存详细分析结果
            vis_dir = os.path.join(self.opt.outf, self.opt.name, 'test', 'interpretability', f'epoch_{self.epoch + 1}')
            os.makedirs(vis_dir, exist_ok=True)

            with open(os.path.join(vis_dir, "detailed_metrics.txt"), "w") as f:
                f.write(f"Final AUC: {auc:.4f}\n")
                f.write(f"Reconstruction AUC: {recon_auc:.4f}\n")
                f.write(f"Latent AUC: {latent_auc:.4f}\n")
                f.write(f"Feature AUC: {feature_auc:.4f}\n\n")

                # 各类样本统计
                normal_mask = self.gt_labels == 0
                anomaly_mask = self.gt_labels == 1

                if len(self.an_scores[normal_mask]) > 0:
                    f.write(f"Normal samples - Score stats:\n")
                    f.write(f"  Mean: {self.an_scores[normal_mask].mean().item():.4f}\n")
                    f.write(f"  Std: {self.an_scores[normal_mask].std().item():.4f}\n")
                    f.write(f"  Max: {self.an_scores[normal_mask].max().item():.4f}\n")

                if len(self.an_scores[anomaly_mask]) > 0:
                    f.write(f"\nAnomaly samples - Score stats:\n")
                    f.write(f"  Mean: {self.an_scores[anomaly_mask].mean().item():.4f}\n")
                    f.write(f"  Std: {self.an_scores[anomaly_mask].std().item():.4f}\n")
                    f.write(f"  Min: {self.an_scores[anomaly_mask].min().item():.4f}\n")

                # 计算最佳阈值
                all_scores = self.an_scores.cpu().numpy()
                all_labels = self.gt_labels.cpu().numpy()

                from sklearn.metrics import roc_curve
                fpr, tpr, thresholds = roc_curve(all_labels, all_scores)
                optimal_idx = np.argmax(tpr - fpr)
                optimal_threshold = thresholds[optimal_idx]

                f.write(f"\nOptimal threshold: {optimal_threshold:.4f}\n")

                # 阈值评估
                predictions = (all_scores >= optimal_threshold).astype(int)
                from sklearn.metrics import classification_report
                report = classification_report(all_labels, predictions, target_names=['Normal', 'Anomaly'],
                                               output_dict=True)
                f.write(f"\nClassification Report at optimal threshold:\n")
                f.write(
                    f"Normal - Precision: {report['Normal']['precision']:.4f}, Recall: {report['Normal']['recall']:.4f}\n")
                f.write(
                    f"Anomaly - Precision: {report['Anomaly']['precision']:.4f}, Recall: {report['Anomaly']['recall']:.4f}\n")
                f.write(f"Overall - F1: {report['weighted avg']['f1-score']:.4f}, Accuracy: {report['accuracy']:.4f}\n")

            # 生成增强的可视化
            if self.opt.save_test_images:
                from lib.interpretability import (
                    plot_reconstruction_comparison,
                    plot_latent_distribution,
                    plot_anomaly_score_distribution,
                    plot_roc_curve,
                    plot_score_distribution_by_component
                )

                # 绘制各分量分数分布
                plot_score_distribution_by_component(
                    scores_dict={
                        'Total': self.an_scores,
                        'Reconstruction': self.recon_errors,
                        'Latent': self.latent_errors,
                        'Feature': self.feature_errors
                    },
                    labels=self.gt_labels,
                    save_path=os.path.join(vis_dir, "score_distribution_by_component.png")
                )

                # 绘制ROC曲线
                plot_roc_curve(
                    labels=self.gt_labels,
                    scores=self.an_scores,
                    save_path=os.path.join(vis_dir, "roc_curve.png")
                )

                # 原有可视化
                if len(vis_normal_imgs) > 0:
                    normal_imgs_tensor = torch.stack(vis_normal_imgs)
                    normal_fakes_tensor = torch.stack(vis_normal_fakes)
                    plot_reconstruction_comparison(
                        real=normal_imgs_tensor,
                        fake=normal_fakes_tensor,
                        save_path=os.path.join(vis_dir, "normal_reconstructions.png"),
                        n=len(vis_normal_imgs)
                    )

                if len(vis_anomaly_imgs) > 0:
                    anomaly_imgs_tensor = torch.stack(vis_anomaly_imgs)
                    anomaly_fakes_tensor = torch.stack(vis_anomaly_fakes)
                    plot_reconstruction_comparison(
                        real=anomaly_imgs_tensor,
                        fake=anomaly_fakes_tensor,
                        save_path=os.path.join(vis_dir, "anomaly_reconstructions.png"),
                        n=len(vis_anomaly_imgs)
                    )

                # 潜在空间可视化
                tsne_sample_size = min(1000, dataset_size)
                if dataset_size > tsne_sample_size:
                    sample_idx = torch.randperm(dataset_size)[:tsne_sample_size]
                    latent_i_sample = self.latent_i[sample_idx]
                    latent_o_sample = self.latent_o[sample_idx]
                    labels_sample = self.gt_labels[sample_idx]
                else:
                    latent_i_sample = self.latent_i
                    latent_o_sample = self.latent_o
                    labels_sample = self.gt_labels

                plot_latent_distribution(
                    latent_i=latent_i_sample,
                    latent_o=latent_o_sample,
                    labels=labels_sample,
                    save_path=os.path.join(vis_dir, "latent_space_tsne.png")
                )

                plot_anomaly_score_distribution(
                    scores=self.an_scores,
                    labels=self.gt_labels,
                    save_path=os.path.join(vis_dir, "anomaly_score_distribution.png")
                )

            return performance


##
class Ganomaly(BaseModel):
    """GANomaly Class
    """

    @property
    def name(self):
        return 'Ganomaly'

    def   __init__(self, opt, dataloader):
        super(Ganomaly, self).__init__(opt, dataloader)

        # -- Misc attributes
        self.epoch = 0
        self.times = []
        self.total_steps = 0

        ##
        # Create and initialize networks.
        self.netg = NetG(self.opt).to(self.device)
        self.netd = NetD(self.opt, use_semi=opt.use_semi).to(self.device)
        self.netg.apply(weights_init)
        self.netd.apply(weights_init)

        # 初始化判别器重初始化相关变量
        self.last_reinit_epoch = -10  # 初始值设为远小于当前epoch
        self.reinit_count = 0  # 统计重初始化次数

        ##
        if self.opt.resume != '':
            print("\nLoading pre-trained networks.")
            self.opt.iter = torch.load(os.path.join(self.opt.resume, 'netG.pth'))['epoch']
            self.netg.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netG.pth'))['state_dict'])
            self.netd.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netD.pth'))['state_dict'])
            print("\tDone.\n")

        self.l_adv = l2_loss
        self.l_con = nn.L1Loss()
        self.l_enc = l2_loss
        self.l_bce = nn.BCELoss()
        self.l_cls = nn.BCELoss() if opt.use_semi else None

        ##
        # Initialize input tensors.
        self.input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32,
                                 device=self.device)
        self.label = torch.empty(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
        self.gt = torch.empty(size=(opt.batchsize,), dtype=torch.long, device=self.device)
        self.fixed_input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize),
                                       dtype=torch.float32, device=self.device)
        self.real_label = torch.ones(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
        self.fake_label = torch.zeros(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
        ##
        # Setup optimizer
        if self.opt.isTrain:
            self.netg.train()
            self.netd.train()
            self.optimizer_d = optim.Adam(
                self.netd.parameters(),
                lr=self.opt.lr*0.7, # 判别器学习率稍低
                betas=(self.opt.beta1, 0.999),
                weight_decay=1e-5,# 添加权重衰减防止过拟合
            )
            self.optimizer_g = optim.Adam(
                self.netg.parameters(),
                lr=self.opt.lr,
                betas=(self.opt.beta1, 0.999),
                weight_decay=1e-5,# 添加权重衰减防止过拟合
            )
            # 添加学习率调度器
            self.scheduler_g = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer_g,
                T_max=self.opt.niter,
                eta_min=self.opt.lr * 0.01  # 最小学习率
            )
            self.scheduler_d = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer_d,
                T_max=self.opt.niter,
                eta_min=self.opt.lr * 0.007  # 最小学习率
            )

    ##
    def forward_g(self):
        """ Forward propagate through netG
        """
        self.fake, self.latent_i, self.latent_o = self.netg(self.input)

    ##
    def forward_d(self):
        """ Forward propagate through netD
        """
        self.pred_real, self.feat_real, self.cls_real = self.netd(self.input)  # (新增分类输出)
        self.pred_fake, self.feat_fake, self.cls_fake = self.netd(self.fake.detach())

    ##
    def backward_g(self):
        """ Backpropagate through netG
        """
        self.err_g_adv = self.l_adv(self.netd(self.input)[1], self.netd(self.fake)[1])
        # 如果使用高低频分离，分别计算高低频重建损失
        # 检查并获取freq-sep层
        if 'freq-sep' in self.netg.encoder1.main._modules:
            freq_sep_layer = self.netg.encoder1.main._modules['freq-sep']
            input_low, input_high = freq_sep_layer.down_sep(self.input)
            fake_low, fake_high = freq_sep_layer.down_sep(self.fake)

            # 给高频损失更高权重
            self.err_g_con_low = self.l_con(fake_low, input_low)
            self.err_g_con_high = self.l_con(fake_high, input_high) * 3.0
            self.err_g_con = self.err_g_con_low + self.err_g_con_high
        else:
            # 原始重建损失计算
            self.err_g_con = self.l_con(self.fake, self.input)
        # self.err_g_con = self.l_con(self.fake, self.input)
        self.err_g_enc = self.l_enc(self.latent_o, self.latent_i)
        self.err_latent_reg = 0.0
        self.err_push = 0.0
        self.err_contrast = 0.0


        # 新增：编码器潜在向量的L2正则化（约束正常样本的潜在向量更紧凑）
        if self.opt.use_semi and self.opt.use_latent_reg:
            normal_mask = self.gt == 0
            if normal_mask.any():
                normal_latent = self.latent_i[normal_mask]
                latent_mean = normal_latent.mean(dim=0, keepdim=True)
                self.err_latent_reg = torch.mean(torch.pow(normal_latent - latent_mean, 2)) * 0.5
            else:
                self.err_latent_reg = 0.0
        else:
            latent_mean = self.latent_i.mean(dim=0, keepdim=True)
            self.err_latent_reg = torch.mean(torch.pow(self.latent_i - latent_mean, 2)) * 0.5

        # 新增：异常样本与正常样本的距离损失
        if self.opt.use_semi and self.opt.use_push:

            normal_mask = (self.gt == 0)
            anomaly_mask = (self.gt == 1)

            if torch.sum(normal_mask) > 0 and torch.sum(anomaly_mask) > 0:
                    # 正常样本中心
                normal_center = torch.mean(self.latent_i[normal_mask], dim=0, keepdim=True)
                    # 异常样本到正常中心的平均距离
                anomaly_dist = torch.mean(torch.norm(self.latent_i[anomaly_mask] - normal_center, dim=1))
                    # 目标：让异常样本离正常中心至少有一个阈值距离（如5.0，根据实际分布调整）
                self.err_push = torch.max(torch.tensor(0.0, device=self.device), 5.0 - anomaly_dist) * 10.0

        #新增对比学习损失（强化正常样本聚类）
        if self.opt.use_semi and self.opt.use_contrast:

            # 获取batch中的标签
            batch_labels = self.gt  # self.gt包含当前batch的标签（0=正常，1=异常）

            # 确保有足够的正常样本进行对比
            normal_count = torch.sum(batch_labels == 0)
            if normal_count > 2:
                # 调用时添加labels参数
                self.err_contrast = semi_supervised_info_nce_loss(
                    features=self.latent_i.squeeze(),  # 确保特征是[B, D]维度
                    labels=batch_labels,
                    normal_class=0,
                    temperature=self.opt.contrast_temp,
                )


        self.err_g = self.err_g_adv * self.opt.w_adv + \
                     self.err_g_con * self.opt.w_con + \
                     self.err_g_enc * self.opt.w_enc

        if self.opt.use_contrast:
            self.err_g += self.err_contrast * self.opt.w_contrast
        if self.opt.use_push:
            self.err_g += self.err_push
        if self.opt.use_latent_reg:
            self.err_g += self.err_latent_reg * self.opt.w_latent_reg
        # self.err_g.backward(retain_graph=True)
        #梯度裁剪
        self.err_g.backward()
        torch.nn.utils.clip_grad_norm_(self.netg.parameters(), max_norm=0.5)

        # grad_norm_g = torch.nn.utils.clip_grad_norm_(self.netg.parameters(), float('inf'))  # 不裁剪，只计算范数
        # if self.total_steps % self.opt.print_freq == 0:
        #     print(f"Generator Grad Norm: {grad_norm_g:.4f}")

    ##
    def backward_d(self):
        """ Backpropagate through netD
        """
        # Real - Fake Loss
        self.err_d_real = self.l_bce(self.pred_real, self.real_label)
        self.err_d_fake = self.l_bce(self.pred_fake, self.fake_label)
        err_adv = (self.err_d_real + self.err_d_fake) * 0.5
        self.err_cls = 0.0  # 默认为0
        if self.opt.use_semi and self.netd.anomaly_classifier is not None:
            # 标签转换：long(0/1) → float32(0.0/1.0)，适配BCELoss
            cls_labels = self.gt.float().unsqueeze(1)
            cls_labels = cls_labels.squeeze()
            self.err_cls = self.l_cls(self.cls_real, cls_labels)  # 仅用真实样本训练分类

        # NetD Loss & Backward-Pass
        # self.err_d = (self.err_d_real + self.err_d_fake) * 0.5
        # 3. 判别器总损失（对抗损失 + 分类损失）
        self.err_d = err_adv * self.opt.w_adv + self.err_cls * self.opt.w_cls
        self.err_d.backward()
        torch.nn.utils.clip_grad_norm_(self.netd.parameters(), max_norm=1.0)

        # 监控判别器梯度范数
        # grad_norm_d = torch.nn.utils.clip_grad_norm_(self.netd.parameters(), float('inf'))
        # if self.total_steps % self.opt.print_freq == 0:
        #     print(f"Discriminator Grad Norm: {grad_norm_d:.4f}")

    ##
    def reinit_d(self):
        """ Re-initialize the weights of netD
        """
        self.netd.apply(weights_init)
        print('   Reloading net d')

    def optimize_params(self):
        """ Forwardpass, Loss Computation and Backwardpass.
        """
        # Forward-pass
        self.forward_g()
        self.forward_d()

        # Backward-pass
        # netg
        self.optimizer_g.zero_grad()
        self.backward_g()
        self.optimizer_g.step()

        # netd
        self.optimizer_d.zero_grad()
        self.backward_d()
        self.optimizer_d.step()