"""PatchCore and PatchCore detection methods."""
import logging
import os
import pickle

import numpy as np
import torch
import torch.nn.functional as F
import tqdm

import patchcore
import patchcore.backbones
import patchcore.common
import patchcore.sampler

LOGGER = logging.getLogger(__name__)


class PatchCore(torch.nn.Module):
    def __init__(self, device):
        """PatchCore anomaly detection class."""
        super(PatchCore, self).__init__()
        self.device = device
        self.projection_heads = None  # 将在load方法中初始化，为每层特征创建不同的投影头

    def load(
            self,
            backbone,
            layers_to_extract_from,
            device,
            input_shape,
            pretrain_embed_dimension,
            target_embed_dimension,
            patchsize=3,
            patchstride=1,
            anomaly_score_num_nn=1,
            featuresampler=patchcore.sampler.IdentitySampler(),
            nn_method=patchcore.common.FaissNN(False, 4),
            **kwargs,
    ):
        self.backbone = backbone.to(device)
        self.layers_to_extract_from = layers_to_extract_from
        self.input_shape = input_shape
        self.device = device
        self.patch_maker = PatchMaker(patchsize, stride=patchstride)

        # feature blocks
        self.forward_modules = torch.nn.ModuleDict()
        agg = patchcore.common.NetworkFeatureAggregator(
            self.backbone, layers_to_extract_from, device
        )
        self.forward_modules["feature_aggregator"] = agg
        pre = patchcore.common.Preprocessing(agg.feature_dimensions(input_shape), pretrain_embed_dimension)
        self.forward_modules["preprocessing"] = pre
        padapt = patchcore.common.Aggregator(target_dim=target_embed_dimension).to(device)
        self.forward_modules["preadapt_aggregator"] = padapt

        # 为每层特征创建单独的投影头
        self.projection_heads = torch.nn.ModuleList()
        feature_dimensions = self.forward_modules["feature_aggregator"].feature_dimensions(input_shape)

        print(f"Feature dimensions type: {type(self.forward_modules['feature_aggregator'].feature_dimensions(input_shape))}")
        print(f"Feature dimensions: {self.forward_modules['feature_aggregator'].feature_dimensions(input_shape)}")
        print(f"Layers to extract from: {self.layers_to_extract_from}")

        for layer_idx, layer_name in enumerate(self.layers_to_extract_from):
            # 获取该层特征的维度 - 修复索引问题
            if isinstance(feature_dimensions, dict):
                # 如果是字典形式
                layer_dim = feature_dimensions[layer_name]
            else:
                # 如果是列表形式
                layer_dim = feature_dimensions[layer_idx]
            
            patch_dim = layer_dim * (patchsize ** 2)  # 每个patch的维度
            
            # 创建该层的投影头
            proj_head = torch.nn.Sequential(
                torch.nn.Linear(patch_dim, target_embed_dimension),
                torch.nn.BatchNorm1d(target_embed_dimension),
                torch.nn.ReLU(),
                torch.nn.Dropout(0.2),
                torch.nn.Linear(target_embed_dimension, target_embed_dimension),
                torch.nn.ReLU(),
                torch.nn.Linear(target_embed_dimension, target_embed_dimension),
            ).to(device)
            
            self.projection_heads.append(proj_head)

        # utility blocks
        self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(
            n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method
        )
        self.anomaly_segmentor = patchcore.common.RescaleSegmentor(device=device, target_size=input_shape[-2:])
        self.featuresampler = featuresampler

    def embed(self, data):
        if isinstance(data, torch.utils.data.DataLoader):
            features = []
            for image in data:
                if isinstance(image, dict):
                    image = image["image"]
                with torch.no_grad():
                    input_image = image.to(torch.float).to(self.device)
                    features.append(self._embed(input_image))
            return features
        return self._embed(data)

    def _embed(self, images, detach=True, provide_patch_shapes=False):
        def _maybe_det(x):
            return [t.detach().cpu().numpy() for t in x] if detach else x

        self.forward_modules["feature_aggregator"].eval()
        with torch.no_grad():
            feats = self.forward_modules["feature_aggregator"](images)
        feats = [feats[l] for l in self.layers_to_extract_from]
        feats = [self.patch_maker.patchify(f, True) for f in feats]
        patch_shapes = [p[1] for p in feats]
        feats = [p[0] for p in feats]
        ref_shape = patch_shapes[0]

        processed_feats = []
        for i, (f, (h, w)) in enumerate(zip(feats, patch_shapes)):
            batch_size = f.shape[0]
            # 直接一步重塑为所需形状
            f = f.reshape(batch_size * h * w, -1)
            f_proj = self.projection_heads[i](f)
            processed_feats.append(f_proj)

        # 确保所有层的特征大小一致
        # min_size = min(f.shape[0] for f in processed_feats)
        # processed_feats = [f[:min_size] for f in processed_feats]
        # 优化3，之前操作操作强行将所有层的特征统一为最小 patch 数，可能导致信息丢失，也没有从根本上解决"各层 patch 数不一致"的根因。改为独立处理后聚合
        processed_feats = []
        for i in range(len(feats)):
            f = feats[i]  # (B, P, C, ps, ps)
            h, w = patch_shapes[i]

            # reshape & flatten
            batch_size = f.shape[0]
            f = f.reshape(batch_size, h, w, f.shape[2], f.shape[3], f.shape[4])
            f = f.permute(0, 3, 4, 5, 1, 2)  # (B, C, ps, ps, h, w)
            f = f.reshape(batch_size * h * w, -1)

            # 使用对应层的投影头
            f_proj = self.projection_heads[i](f)
            processed_feats.append(f_proj)

        # 最后做 concat 或 mean
        # feats = torch.cat(processed_feats, dim=0)  # (N_total, D)
        #
        # feats = self.forward_modules["preprocessing"](processed_feats)
        # feats = self.forward_modules["preadapt_aggregator"](feats)
        # feats = self.projection_head(feats)
        # 优化四 已经对每层 f_proj = self.projection_head(f) 做了 MLP 投影了，但后面又重新聚合并再次投影了一次
        feats = torch.cat(processed_feats, dim=0)  # 已经是投影后的
        feats = self.forward_modules["preprocessing"](feats)
        feats = self.forward_modules["preadapt_aggregator"](feats)
        # return (_maybe_det(feats), patch_shapes) if provide_patch_shapes else _maybe_det(feats)
        #优化1 如果是 list of tensor，合并成一个 tensor
        if isinstance(feats, list):
            feats = torch.cat([f for f in feats], dim=0)
        return (_maybe_det(feats), patch_shapes) if provide_patch_shapes else _maybe_det(feats)

    def _train_memory_bank(self, training_data, epochs=5, lr=1e-3):
        if not hasattr(self.anomaly_scorer, "memory_features"):
            LOGGER.warning("Memory features not found; skipping optimization.")
            return

        memory_bank = torch.nn.Parameter(
            torch.tensor(self.anomaly_scorer.memory_features[0], device=self.device)
        )
        optimizer = torch.optim.Adam(
            list(self.projection_heads.parameters()) + [memory_bank], lr=lr
        )

        for epoch in range(epochs):
            losses = []
            for batch in tqdm.tqdm(training_data, desc=f"Memory Bank Training Epoch {epoch + 1}", leave=False):
                if isinstance(batch, dict):
                    images = batch["image"]
                else:
                    images = batch
                input_image = images.to(torch.float).to(self.device)
                with torch.no_grad():
                    features = self._embed(input_image, detach=False)

                features = torch.stack(features, dim=0)
                distances = torch.cdist(features, memory_bank)
                min_distances = torch.cdist(features, memory_bank).min(dim=1).values
                loss = torch.cdist(features, memory_bank).min(dim=1).values.mean()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses.append(loss.item())

            LOGGER.info(f"Epoch {epoch + 1}, Mean anomaly loss: {np.mean(losses):.4f}")

        self.anomaly_scorer.memory_features = [memory_bank.detach().cpu().numpy()]

    def fit(self, training_loader):
        # backbone eval / projection head train (fix‑1)
        self.forward_modules.eval()
        self.projection_heads.train()

        # ---- build memory bank (stream) ----
        patches = []
        for batch in tqdm.tqdm(training_loader, desc="Embed train set", leave=False):
            imgs = batch["image"] if isinstance(batch, dict) else batch
            imgs = imgs.to(torch.float).to(self.device)
            with torch.no_grad():
                feats = self._embed(imgs, detach=True)
                # 确保特征是2D的 (N, D)
                if isinstance(feats, list):
                    feats = np.concatenate([f.reshape(-1, f.shape[-1]) for f in feats], axis=0)
                patches.append(feats)
        
        # 连接所有特征
        memory_features = np.concatenate(patches, axis=0)
        print("Memory features shape before sampling:", memory_features.shape)
        
        # 使用特征采样器
        memory_features = self.featuresampler.run(memory_features)
        print("Memory features shape after sampling:", memory_features.shape)
        
        # 转换为tensor并设置为参数
        self.memory_bank = torch.nn.Parameter(torch.tensor(memory_features, device=self.device))
        self.anomaly_scorer.memory_features = [self.memory_bank.detach().cpu().numpy()]
        #优化2 没有注册到self.memory_bank或self.register_parameter()，所以 self.state_dict() 不会保存该参数，模型保存时将丢失该训练状态
        opt = torch.optim.Adam(list(self.projection_heads.parameters()) + [self.memory_bank], lr=1e-3)

        # ---- joint optimisation ----
        # opt = torch.optim.Adam(list(self.projection_head.parameters()) + [memory_bank], lr=1e-3)
        for ep in range(10):
            ep_loss = []
            for batch in tqdm.tqdm(training_loader, desc=f"Joint‑epoch {ep+1}", leave=False):
                imgs = batch["image"] if isinstance(batch, dict) else batch
                imgs = imgs.to(torch.float).to(self.device)
                with torch.no_grad():
                    embeds = self._embed(imgs, detach=False)
                    if isinstance(embeds, list):
                        embeds = torch.cat([e.reshape(-1, e.shape[-1]) for e in embeds], dim=0)
                loss = torch.cdist(embeds, self.memory_bank).min(dim=1).values.mean()
                opt.zero_grad(); loss.backward(); opt.step()
                ep_loss.append(loss.item())
            LOGGER.info("Joint epoch %d  loss=%.4f", ep+1, np.mean(ep_loss))

        # rebuild faiss index after optimisation
        self.anomaly_scorer.memory_features = [self.memory_bank.detach().cpu().numpy()]
        self.anomaly_scorer.fit(detection_features=self.anomaly_scorer.memory_features)

    def _fill_memory_bank(self, input_data):
        """Computes and sets the support features for SPADE."""
        _ = self.forward_modules.eval()

        def _image_to_features(input_image):
            with torch.no_grad():
                input_image = input_image.to(torch.float).to(self.device)
                return self._embed(input_image)

        features = []
        with tqdm.tqdm(
            input_data, desc="Computing support features...", position=1, leave=False
        ) as data_iterator:
            for image in data_iterator:
                if isinstance(image, dict):
                    image = image["image"]
                features.append(_image_to_features(image))

        features = np.concatenate(features, axis=0)
        features = self.featuresampler.run(features)

        self.anomaly_scorer.fit(detection_features=[features])

    # def _train_memory_bank(self, training_data, epochs=5, lr=1e-3):
    #     if not hasattr(self.anomaly_scorer, "memory_features"):
    #         LOGGER.warning("Memory features not found; skipping optimization.")
    #         return
    #
    #     memory_bank = torch.nn.Parameter(
    #         torch.tensor(self.anomaly_scorer.memory_features[0], device=self.device)
    #     )
    #     optimizer = torch.optim.Adam([memory_bank], lr=lr)
    #
    #     for epoch in range(epochs):
    #         losses = []
    #         for batch in tqdm.tqdm(training_data, desc=f"Memory Bank Training Epoch {epoch + 1}", leave=False):
    #             if isinstance(batch, dict):
    #                 images = batch["image"]
    #             else:
    #                 images = batch
    #             input_image = images.to(torch.float).to(self.device)
    #             with torch.no_grad():
    #                 features = self._embed(input_image, detach=False)
    #
    #             # 最小化每个 patch 到 memory 中最小距离（异常分数）
    #             features = torch.stack(features, dim=0)
    #             distances = torch.cdist(features, memory_bank)
    #             min_distances = distances.min(dim=1).values
    #             loss = min_distances.mean()
    #
    #             optimizer.zero_grad()
    #             loss.backward()
    #             optimizer.step()
    #             losses.append(loss.item())
    #
    #         LOGGER.info(f"Epoch {epoch + 1}, Mean anomaly loss: {np.mean(losses):.4f}")
    #
    #     # 更新记忆库中的内容
    #     self.anomaly_scorer.memory_features = [memory_bank.detach().cpu().numpy()]

    def predict(self, data):
        if isinstance(data, torch.utils.data.DataLoader):
            return self._predict_dataloader(data)
        return self._predict(data)

    def _predict_dataloader(self, dataloader):
        _ = self.forward_modules.eval()
        scores = []
        masks = []
        labels_gt = []
        masks_gt = []
        with tqdm.tqdm(dataloader, desc="Inferring...", leave=False) as data_iterator:
            for batch in data_iterator:
                if isinstance(batch, dict):
                    labels_gt.extend(batch["is_anomaly"].numpy().tolist())
                    masks_gt.extend(batch["mask"].numpy().tolist())
                    images = batch["image"]
                    fg_mask = batch.get("foreground_mask", None)
                else:
                    images = batch
                    fg_mask = None
                _scores, _masks = self._predict(images, fg_mask=fg_mask)
                scores.extend(_scores)
                masks.extend(_masks)
        # 确保 masks 不为空
        if not masks:
            raise ValueError("未生成任何异常分割。请检查模型输出。")
        return scores, masks, labels_gt, masks_gt

    def _predict(self, images, fg_mask=None):
        images = images.to(torch.float).to(self.device)
        _ = self.forward_modules.eval()
        batchsize = images.shape[0]
        with torch.no_grad():
            features, patch_shapes = self._embed(images, provide_patch_shapes=True)
            
            # 确保特征是2D的 (N, D)
            if isinstance(features, list):
                features = np.concatenate([f.reshape(-1, f.shape[-1]) for f in features], axis=0)
            
            # 计算异常分数
            patch_scores = self.anomaly_scorer.predict([features])[0]
            
            # 打印调试信息
            print(f"Patch scores shape: {patch_scores.shape}")
            print(f"Batch size: {batchsize}")
            
            # 使用第一层的形状作为参考
            h, w = patch_shapes[0]
            patches_per_image = h * w  # 这是每个图像应有的patch数量
            
            print(f"Reference patch shape (h,w): {h}x{w}")
            print(f"Patches per image: {patches_per_image}")
            print(f"Total patches expected: {batchsize * patches_per_image}")
            print(f"Actual patch scores length: {len(patch_scores)}")
            
            # 处理patch_scores长度不匹配的情况
            if len(patch_scores) < batchsize * patches_per_image:
                # 如果分数不够，复制扩展
                needed_length = batchsize * patches_per_image
                repetitions = (needed_length + len(patch_scores) - 1) // len(patch_scores)
                patch_scores = np.tile(patch_scores, repetitions)[:needed_length]
            elif len(patch_scores) > batchsize * patches_per_image:
                # 如果分数太多，截断
                patch_scores = patch_scores[:batchsize * patches_per_image]
            
            # 计算图像级别的分数
            patch_scores_reshaped = patch_scores.reshape(batchsize, patches_per_image)
            image_scores = np.max(patch_scores_reshaped, axis=1)
            
            # 创建2D分数图
            patch_scores_2d = patch_scores_reshaped.reshape(batchsize, h, w)
            
            # 若前景掩膜存在，则调整背景区域的异常分数
            if fg_mask is not None:
                fg_mask_resized = F.interpolate(fg_mask, size=(h, w), mode="nearest")
                fg_mask_resized = fg_mask_resized.squeeze(1).cpu().numpy()
                patch_scores_2d = patch_scores_2d * fg_mask_resized
            
            # 生成分割掩膜
            masks = self.anomaly_segmentor.convert_to_segmentation(patch_scores_2d)
            
        return [score for score in image_scores], [mask for mask in masks]


    @staticmethod
    def _params_file(filepath, prepend=""):
        return os.path.join(filepath, prepend + "patchcore_params.pkl")

    def save_to_path(self, save_path: str, prepend: str = "") -> None:
        LOGGER.info("Saving PatchCore data.")
        self.anomaly_scorer.save(
            save_path, save_features_separately=False, prepend=prepend
        )
        patchcore_params = {
            "backbone.name": self.backbone.name,
            "layers_to_extract_from": self.layers_to_extract_from,
            "input_shape": self.input_shape,
            "pretrain_embed_dimension": self.forward_modules[
                "preprocessing"
            ].output_dim,
            "target_embed_dimension": self.forward_modules[
                "preadapt_aggregator"
            ].target_dim,
            "patchsize": self.patch_maker.patchsize,
            "patchstride": self.patch_maker.stride,
            "anomaly_scorer_num_nn": self.anomaly_scorer.n_nearest_neighbours,
        }
        with open(self._params_file(save_path, prepend), "wb") as save_file:
            pickle.dump(patchcore_params, save_file, pickle.HIGHEST_PROTOCOL)

    def load_from_path(
        self,
        load_path: str,
        device: torch.device,
        nn_method: patchcore.common.FaissNN(False, 4),
        prepend: str = "",
    ) -> None:
        LOGGER.info("Loading and initializing PatchCore.")
        with open(self._params_file(load_path, prepend), "rb") as load_file:
            patchcore_params = pickle.load(load_file)
        patchcore_params["backbone"] = patchcore.backbones.load(
            patchcore_params["backbone.name"]
        )
        patchcore_params["backbone"].name = patchcore_params["backbone.name"]
        del patchcore_params["backbone.name"]
        self.load(**patchcore_params, device=device, nn_method=nn_method)

        self.anomaly_scorer.load(load_path, prepend)


# Image handling classes.a
class PatchMaker:
    def __init__(self, patchsize, stride=None):
        self.patchsize = patchsize
        self.stride = stride

    def patchify(self, features, return_spatial_info=False):
        """Convert a tensor into a tensor of respective patches.
        Args:
            x: [torch.Tensor, bs x c x w x h]
        Returns:
            x: [torch.Tensor, bs * w//stride * h//stride, c, patchsize,
            patchsize]
        """
        padding = int((self.patchsize - 1) / 2)
        unfolder = torch.nn.Unfold(
            kernel_size=self.patchsize, stride=self.stride, padding=padding, dilation=1
        )
        unfolded_features = unfolder(features)
        number_of_total_patches = []
        for s in features.shape[-2:]:
            n_patches = (
                s + 2 * padding - 1 * (self.patchsize - 1) - 1
            ) / self.stride + 1
            number_of_total_patches.append(int(n_patches))
        unfolded_features = unfolded_features.reshape(
            *features.shape[:2], self.patchsize, self.patchsize, -1
        )
        unfolded_features = unfolded_features.permute(0, 4, 1, 2, 3)

        if return_spatial_info:
            return unfolded_features, number_of_total_patches
        return unfolded_features

    def unpatch_scores(self, x, batchsize):
        return x.reshape(batchsize, -1, *x.shape[1:])

    def score(self, x):
        was_numpy = False
        if isinstance(x, np.ndarray):
            was_numpy = True
            x = torch.from_numpy(x)
        while x.ndim > 1:
            x = torch.max(x, dim=-1).values
        if was_numpy:
            return x.numpy()
        return x
