from pathlib import Path
from typing import Dict
import numpy as np
import cv2
import torch
import mediapy as media
import sys
sys.path.append("..")
from autoencoder.model import Autoencoder
from apply_clip import OpenCLIPNetwork,OpenCLIPNetworkConfig
import glob
from natsort import natsorted
from eval import colormaps
import os

# 基于局部众数的滤波操作
def smooth(mask):
    h, w = mask.shape[:2]
    im_smooth = mask.copy()
    scale = 3
    for i in range(h):
        for j in range(w):
            # 创建了一个窗口
            square = mask[max(0, i-scale) : min(i+scale+1, h-1),
                          max(0, j-scale) : min(j+scale+1, w-1)]
            im_smooth[i, j] = np.argmax(np.bincount(square.reshape(-1)))
    return im_smooth

def colormap_saving(image: torch.Tensor, colormap_options, save_path):
    """
    if image's shape is (h, w, 1): draw colored relevance map;
    if image's shape is (h, w, 3): return directively;
    if image's shape is (h, w, c): execute PCA and transform it into (h, w, 3).
    """
    output_image = (
        colormaps.apply_colormap(
            image=image,
            colormap_options=colormap_options,
        ).cpu().numpy()
    )
    if save_path is not None:
        media.write_image(save_path.with_suffix(".png"), output_image, fmt="png")
    return output_image

def vis_mask_save(mask, save_path: Path = None):
    mask_save = mask.copy()
    mask_save[mask == 1] = 255
    save_path.parent.mkdir(exist_ok=True, parents=True)
    cv2.imwrite(str(save_path), mask_save)

def activate_stream(sem_map, 
                    image, 
                    clip_model, 
                    image_name: Path = None,
                    img_ann: Dict = None, 
                    thresh : float = 0.5, 
                    colormap_options = None):
    # valid_map [1,n_phrases, h, w]
    valid_map = clip_model.get_max_across(sem_map) 
    n_head, n_prompt, h, w = valid_map.shape

    chosen_iou_list, chosen_lvl_list = [], []
    for k in range(n_prompt):
        iou_lvl = np.zeros(n_head)
        mask_lvl = np.zeros((n_head, h, w))
        for i in range(n_head):
            # NOTE 加滤波结果后的激活值图中找最大值点
            scale = 30 # 滤波核大小
            kernel = np.ones((scale,scale)) / (scale**2)
            np_relev = valid_map[i][k].cpu().numpy()
            avg_filtered = cv2.filter2D(np_relev, -1, kernel)
            avg_filtered = torch.from_numpy(avg_filtered).to(valid_map.device)
            valid_map[i][k] = 0.5 * (avg_filtered + valid_map[i][k])
            # 保存激活值图
            output_path_relev = image_name / 'heatmap' / f'{clip_model.positives[k]}_{i}'
            output_path_relev.parent.mkdir(exist_ok=True, parents=True)
            colormap_saving(valid_map[i][k].unsqueeze(-1), colormap_options,
                            output_path_relev)
            
            # NOTE 与lerf一致，激活值低于0.5的认为是背景
            # 裁剪激活值，将低于0.5的设为0
            p_i = torch.clip(valid_map[i][k] - 0.5, 0, 1).unsqueeze(-1)
            # 归一化激活值用于可视化
            valid_composited = colormaps.apply_colormap(p_i / (p_i.max() + 1e-6), colormaps.ColormapOptions("turbo"))
            # 激活值低于0.5的区域被视为背景
            mask = (valid_map[i][k] < 0.5).squeeze()
            # 背景区域显示为原图的30%亮度
            valid_composited[mask, :] = image[mask, :] * 0.3
            output_path_compo = image_name / 'composited' / f'{clip_model.positives[k]}_{i}'
            output_path_compo.parent.mkdir(exist_ok=True, parents=True)
            colormap_saving(valid_composited, colormap_options, output_path_compo)

            # truncate the heatmap into mask
            output = valid_map[i][k]
            output = output - torch.min(output)
            output = output / (torch.max(output) + 1e-9)
            output = output * (1.0 - (-1.0)) + (-1.0)
            output = torch.clip(output, 0, 1)
            # 转为二值图片
            mask_pred = (output.cpu().numpy() > thresh).astype(np.uint8)
            mask_pred = smooth(mask_pred)
            mask_lvl[i] = mask_pred
            #mask_gt = img_ann[clip_model.positives[k]]['mask'].astype(np.uint8)

            # calculate iou
            #intersection = np.sum(np.logical_and(mask_gt, mask_pred))
            #union = np.sum(np.logical_or(mask_gt, mask_pred))
            #iou = np.sum(intersection) / np.sum(union)
            #iou_lvl[i] = iou
        
        score_lvl = torch.zeros((n_head,), device=valid_map.device)
        for i in range(n_head):
            score = valid_map[i, k].max()
            score_lvl[i] = score
        chosen_lvl = torch.argmax(score_lvl)
        chosen_iou_list.append(iou_lvl[chosen_lvl])
        chosen_lvl_list.append(chosen_lvl.cpu().numpy())
        
        # save for visulsization
        save_path = image_name / f'chosen_{clip_model.positives[k]}.png'
        vis_mask_save(mask_lvl[chosen_lvl], save_path)

    return chosen_iou_list, chosen_lvl_list

def evaluate(output_path, ae_ckpt_path, encoder_hidden_dims, decoder_hidden_dims,mask_thresh):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    colormap_options = colormaps.ColormapOptions(
        colormap="turbo",
        normalize=True,
        colormap_min=-1.0,
        colormap_max=1.0,
    )
    clip_model = OpenCLIPNetwork(OpenCLIPNetworkConfig)
    checkpoint = torch.load(ae_ckpt_path, map_location=device)
    model = Autoencoder(encoder_hidden_dims, decoder_hidden_dims).to(device)
    model.load_state_dict(checkpoint)
    model.eval()
    
    compressed_sem_feat_path = natsorted(glob.glob(f'{output_path}/seg_map/semantic_map*.npy'))
    images_path = natsorted(glob.glob(f'{output_path}/rgb/*.png'))
    output_result = os.path.join(output_path,'eval_result')
    os.makedirs(output_result,exist_ok=True)
    chosen_iou_all, chosen_lvl_list = [], []
    for i, (compressed_sem_feat, image_path) in enumerate(zip(compressed_sem_feat_path, images_path)):
        image_name = image_path.split('/')[-1].split('.')[0]
        image_name = Path(output_result) /  f'{image_name}'
        image_name.mkdir(exist_ok=True,parents=True)
        sem_feat = torch.from_numpy(np.load(compressed_sem_feat)).float().permute(1,2,0).to(device)
        image = cv2.imread(image_path)[..., ::-1]
        image = (image / 255.0).astype(np.float32)
        image = torch.from_numpy(image).to(device)
        with torch.no_grad():
            h, w, _ = sem_feat.shape
            restored_feat = model.decode(sem_feat.flatten(0, 1))
            restored_feat = restored_feat.view(h, w, -1)
            print(restored_feat.shape)
        
        # 加载标签label list
        label_list = ['windows']
        clip_model.set_positives(label_list)
        print(label_list)
        c_iou_list, c_lvl = activate_stream(restored_feat, image, clip_model, image_name,
                                            thresh=mask_thresh, colormap_options=colormap_options)
        chosen_iou_all.extend(c_iou_list)
        chosen_lvl_list.extend(c_lvl)


if __name__ == "__main__":
    output_path = '/mnt/c/Users/cyt/Downloads'
    ae_ckpt_path = '../../SemGS_SLAM/src/network/autoencoder/ckpt/ae_ckpt/best_ckpt_3.pth'
    encoder_hidden_dims = [256, 128,64,32,16,3]
    decoder_hidden_dims = [16,32,64,128,256,512]
    evaluate(output_path, ae_ckpt_path, encoder_hidden_dims, decoder_hidden_dims, 0.5)