import logging
import os
import pickle
import numpy as np
import torch
import torch.nn.functional as F
import tqdm

import patchcore
import patchcore.backbones
import patchcore.common
import patchcore.sampler
from patchcore.patchcore import PatchCore, PatchMaker

LOGGER = logging.getLogger(__name__)


class MaskedPatchCore(PatchCore):
    """
    扩展版的PatchCore异常检测类，支持使用深度图和前景掩膜
    """
    def __init__(self, device):
        """初始化MaskedPatchCore"""
        super(MaskedPatchCore, self).__init__(device)

    def load(
        self,
        backbone,
        layers_to_extract_from,
        device,
        input_shape,
        pretrain_embed_dimension,
        target_embed_dimension,
        patchsize=3,
        patchstride=1,
        anomaly_score_num_nn=1,
        featuresampler=patchcore.sampler.IdentitySampler(),
        nn_method=patchcore.common.FaissNN(False, 4),
        mask_importance=0.8,  # 掩膜重要性权重
        **kwargs,
    ):
        """
        加载模型参数，并设置特殊的掩膜处理参数
        
        Args:
            mask_importance: 掩膜重要性系数 (0-1)，控制前景区域的权重
        """
        super().load(
            backbone=backbone,
            layers_to_extract_from=layers_to_extract_from,
            device=device,
            input_shape=input_shape,
            pretrain_embed_dimension=pretrain_embed_dimension,
            target_embed_dimension=target_embed_dimension,
            patchsize=patchsize,
            patchstride=patchstride,
            anomaly_score_num_nn=anomaly_score_num_nn,
            featuresampler=featuresampler,
            nn_method=nn_method,
            **kwargs,
        )
        self.mask_importance = mask_importance

    def embed(self, data):
        """
        提取特征，支持深度图和掩膜
        """
        if isinstance(data, torch.utils.data.DataLoader):
            features = []
            for batch in data:
                if isinstance(batch, dict):
                    image = batch["image"]
                    mask = batch.get("foreground_mask", None)
                    depth = batch.get("depth", None)
                with torch.no_grad():
                    input_image = image.to(torch.float).to(self.device)
                    if mask is not None:
                        mask = mask.to(torch.float).to(self.device)
                    if depth is not None:
                        depth = depth.to(torch.float).to(self.device)
                    features.append(self._embed(input_image, mask=mask, depth=depth))
            return features
        else:
            return self._embed(data)

    def _embed(self, images, mask=None, depth=None, detach=True, provide_patch_shapes=False):
        """
        提取图像的特征嵌入，支持掩膜和深度信息
        
        Args:
            images: 输入图像
            mask: 前景掩膜 (可选)
            depth: 深度图 (可选)
            detach: 是否分离梯度
            provide_patch_shapes: 是否提供补丁形状信息
        """
        def _detach(features):
            if detach:
                return [x.detach().cpu().numpy() for x in features]
            return features

        _ = self.forward_modules["feature_aggregator"].eval()
        with torch.no_grad():
            # 提取特征
            features = self.forward_modules["feature_aggregator"](images)
            features = [features[layer] for layer in self.layers_to_extract_from]
            
            # 如果有掩膜，应用掩膜加权
            if mask is not None:
                for i in range(len(features)):
                    # 将掩膜调整为与特征图相同的大小
                    resized_mask = F.interpolate(
                        mask, 
                        size=(features[i].shape[2], features[i].shape[3]), 
                        mode='bilinear', 
                        align_corners=False
                    )
                    
                    # 创建融合权重: 掩膜区域权重为1，非掩膜区域权重为(1-mask_importance)
                    # 这确保模型关注前景但不完全忽略背景
                    mask_weight = resized_mask * self.mask_importance + (1 - self.mask_importance)
                    
                    # 应用权重到特征图
                    features[i] = features[i] * mask_weight

            # 如果有深度图，融合深度信息（可选）
            if depth is not None:
                for i in range(len(features)):
                    # 将深度图调整为与特征图相同的大小
                    resized_depth = F.interpolate(
                        depth, 
                        size=(features[i].shape[2], features[i].shape[3]), 
                        mode='bilinear', 
                        align_corners=False
                    )
                    
                    # 简单的特征融合方式，将深度信息作为额外通道添加
                    # 这里可以采用更复杂的融合策略
                    depth_features = resized_depth.expand(-1, features[i].shape[1], -1, -1)
                    features[i] = features[i] + 0.1 * depth_features  # 使用小权重避免深度信息主导

            # 后续处理保持原样
            features = [
                self.patch_maker.patchify(x, return_spatial_info=True) for x in features
            ]
            patch_shapes = [x[1] for x in features]
            features = [x[0] for x in features]
            ref_num_patches = patch_shapes[0]

            for i in range(1, len(features)):
                _features = features[i]
                patch_dims = patch_shapes[i]

                _features = _features.reshape(
                    _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]
                )
                _features = _features.permute(0, -3, -2, -1, 1, 2)
                perm_base_shape = _features.shape
                _features = _features.reshape(-1, *_features.shape[-2:])
                _features = F.interpolate(
                    _features.unsqueeze(1),
                    size=(ref_num_patches[0], ref_num_patches[1]),
                    mode="bilinear",
                    align_corners=False,
                )
                _features = _features.squeeze(1)
                _features = _features.reshape(
                    *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]
                )
                _features = _features.permute(0, -2, -1, 1, 2, 3)
                _features = _features.reshape(len(_features), -1, *_features.shape[-3:])
                features[i] = _features
            
            features = [x.reshape(-1, *x.shape[-3:]) for x in features]

            # 特征预处理
            features = self.forward_modules["preprocessing"](features)
            features = self.forward_modules["preadapt_aggregator"](features)

            if provide_patch_shapes:
                return _detach(features), patch_shapes
            return _detach(features)

    def fit(self, training_data):
        """PatchCore训练，支持掩膜和深度信息"""
        self._fill_memory_bank(training_data)

    def _fill_memory_bank(self, input_data):
        """计算并设置支持特征，支持掩膜和深度信息"""
        _ = self.forward_modules.eval()

        def _batch_to_features(batch):
            with torch.no_grad():
                if isinstance(batch, dict):
                    image = batch["image"]
                    mask = batch.get("foreground_mask", None)
                    depth = batch.get("depth", None)
                    image = image.to(torch.float).to(self.device)
                    if mask is not None:
                        mask = mask.to(torch.float).to(self.device)
                    if depth is not None:
                        depth = depth.to(torch.float).to(self.device)
                    return self._embed(image, mask=mask, depth=depth)
                else:
                    batch = batch.to(torch.float).to(self.device)
                    return self._embed(batch)

        features = []
        with tqdm.tqdm(
            input_data, desc="Computing support features...", position=1, leave=False
        ) as data_iterator:
            for batch in data_iterator:
                features.append(_batch_to_features(batch))

        features = np.concatenate(features, axis=0)
        features = self.featuresampler.run(features)

        self.anomaly_scorer.fit(detection_features=[features])

    def _predict(self, images, masks=None, depths=None):
        """
        推断分数和掩码，考虑深度和前景掩膜信息
        
        Args:
            images: 输入图像
            masks: 前景掩膜 (可选)
            depths: 深度图 (可选)
        """
        if not isinstance(images, dict):
            images = images.to(torch.float).to(self.device)
            if masks is not None:
                masks = masks.to(torch.float).to(self.device)
            if depths is not None:
                depths = depths.to(torch.float).to(self.device)
        else:
            batch = images
            images = batch["image"].to(torch.float).to(self.device)
            masks = batch.get("foreground_mask", None)
            if masks is not None:
                masks = masks.to(torch.float).to(self.device)
            depths = batch.get("depth", None)
            if depths is not None:
                depths = depths.to(torch.float).to(self.device)

        _ = self.forward_modules.eval()

        batchsize = images.shape[0]
        with torch.no_grad():
            features, patch_shapes = self._embed(images, mask=masks, depth=depths, provide_patch_shapes=True)
            features = np.asarray(features)

            patch_scores = image_scores = self.anomaly_scorer.predict([features])[0]
            image_scores = self.patch_maker.unpatch_scores(image_scores, batchsize=batchsize)
            image_scores = image_scores.reshape(*image_scores.shape[:2], -1)
            
            # 在这里，如果有掩膜，只考虑掩膜区域的分数
            if masks is not None:
                # 如果有前景掩膜，我们可以只计算掩膜区域的异常分数
                masks_np = masks.cpu().numpy()
                if masks_np.ndim == 4:  # 处理批量图像
                    for i in range(batchsize):
                        # 将掩膜二值化（大于0.5视为前景区域）
                        binary_mask = (masks_np[i] > 0.5).astype(float)
                        if np.sum(binary_mask) > 0:  # 如果有前景区域
                            # 对于图像级分数，只考虑掩膜区域
                            masked_scores = image_scores[i] * binary_mask.flatten()
                            non_zero_indices = np.nonzero(binary_mask.flatten())[0]
                            if len(non_zero_indices) > 0:
                                image_scores[i] = masked_scores[non_zero_indices].mean()
            
            image_scores = self.patch_maker.score(image_scores)

            patch_scores = self.patch_maker.unpatch_scores(patch_scores, batchsize=batchsize)
            scales = patch_shapes[0]
            patch_scores = patch_scores.reshape(batchsize, scales[0], scales[1])
            
            # 应用掩膜到分割图
            if masks is not None:
                masks_np = masks.cpu().numpy()
                if masks_np.ndim == 4:  # 批量图像
                    for i in range(batchsize):
                        # 调整掩膜大小以匹配补丁分数
                        mask_resized = np.squeeze(masks_np[i])
                        if mask_resized.shape != patch_scores[i].shape:
                            from skimage.transform import resize
                            mask_resized = resize(mask_resized, patch_scores[i].shape)
                        
                        # 将掩膜二值化
                        binary_mask = (mask_resized > 0.5).astype(float)
                        
                        # 只保留掩膜区域内的异常分数
                        patch_scores[i] = patch_scores[i] * binary_mask

            masks = self.anomaly_segmentor.convert_to_segmentation(patch_scores)

        return [score for score in image_scores], [mask for mask in masks]

    def predict(self, data):
        """预测异常分数，支持前景掩膜和深度图"""
        if isinstance(data, torch.utils.data.DataLoader):
            return self._predict_dataloader(data)
        return self._predict(data)

    def _predict_dataloader(self, dataloader):
        """为完整的数据加载器提供异常分数/图，支持掩膜和深度信息"""
        _ = self.forward_modules.eval()

        scores = []
        masks = []
        labels_gt = []
        masks_gt = []
        with tqdm.tqdm(dataloader, desc="Inferring...", leave=False) as data_iterator:
            for batch in data_iterator:
                if isinstance(batch, dict):
                    labels_gt.extend(batch["is_anomaly"].cpu().numpy().tolist())
                    masks_gt.extend(batch["mask"].cpu().numpy().tolist())
                    
                    # 直接传递整个批次字典，包含掩膜和深度信息
                    _scores, _masks = self._predict(batch)
                else:
                    _scores, _masks = self._predict(batch)
                
                for score, mask in zip(_scores, _masks):
                    scores.append(score)
                    masks.append(mask)
                    
        return scores, masks, labels_gt, masks_gt 