import os
import json
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import measure
from pycocotools import mask as mask_util
import numpy as np
from segment_anything import SamPredictor, sam_model_registry, SamAutomaticMaskGenerator
from PIL import Image
import cv2
from typing import Optional, Union, List, Dict, Tuple
from mmseg.utils.sam_tools import visualize_img_, visualize_3d_score
from mmseg.datasets.utils.region_relate_entropy import RegionRelateEntropyScore
import torch
from tqdm import tqdm

PALETTE_repo = {
    'Potsdam': [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], [255, 0, 0]],
    'PotsdamRGB': [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], [255, 0, 0]],
    'Vaihingen': [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], [255, 0, 0]],
}


def get_viz_active_mask(ori_img, active_mask, dataset='Potsdam'):
    result = ori_img * 0.5 + np.ones_like(ori_img) * 255 * 0.5
    viz_active_mask = visualize_img_(active_mask[:, :, 0]-1, palette=PALETTE_repo[dataset])[0]
    result[active_mask != 255] = viz_active_mask[active_mask != 255]
    return result.astype(np.uint8)


def read_img(img_path):
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return img


def random_mask(active_map: np.ndarray, mask_percentage: float = 0.2):
    """
    Randomly mask a percentage of points in the active map by setting them to 255.
    Only those points which are not already 255 will be selected.
    :param active_map: The active map to modify. Shape is [H, W, 3].
    :param mask_percentage: The percentage of points to set as 255.
    :return: The modified active map.
    """
    # Identify the positions where active_map is not 255 (valid points) in all channels
    valid_points = np.where(np.all(active_map != 255, axis=-1))  # Check across all channels (R, G, B)

    # Flatten the valid points' indices
    valid_indices = list(zip(valid_points[0], valid_points[1]))

    # Calculate the total number of valid points
    total_valid_points = len(valid_indices)

    # Calculate how many points to mask
    num_mask_points = int(total_valid_points * mask_percentage)

    # Randomly select points to mask
    mask_indices = np.random.choice(total_valid_points, num_mask_points, replace=False)

    # Set the selected points to 255 across all three channels
    for idx in mask_indices:
        y, x = valid_indices[idx]
        active_map[y, x] = [255, 255, 255]  # Set all channels (R, G, B) to 255

    return active_map

if __name__ == '__main__':

    img_dir = '/data/yrz/repos/ST-DASegNet/data/Vaihingen_IRRG_DA/img_dir' + '/train'
    ann_dir = r'/data/yrz/repos/ST-DASegNet/data/Vaihingen_IRRG_DA/ann_dir' + '/train'
    checkpoint_dir = r'./checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_0.022_40_normalize2_region_relate_entropy&impurity/'
    # checkpoint_dir = r'/data/yrz/repos/ST-DASegNet/checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_0.022_40_random'
    # checkpoint_dir = r'/data/yrz/repos/ST-DASegNet/checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_test_0.022_40_pixel_entropy'
    # checkpoint_dir = r'/data/yrz/repos/ST-DASegNet/checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_test_0.022_40_impurity'
    # checkpoint_dir = r'/data/yrz/repos/ST-DASegNet/checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_test_0.022_40_region_relate_entropy'
    # checkpoint_dir = r'/data/yrz/repos/ST-DASegNet/checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_test_0.022_40_region_relate_entropy&impurity'
    # checkpoint_dir = r'/data/yrz/repos/ST-DASegNet/checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_0.022_40_region_relate_entropy_easy'

    # checkpoint_dir = r'./checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_0.022_40_normalize2_region_relate_entropy&impurity_k=3/'
    # checkpoint_dir = r'./checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_0.022_40_normalize2_region_relate_entropy&impurity_k=5/'
    # checkpoint_dir = r'./checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_0.022_40_normalize2_region_relate_entropy&impurity_k=7/'
    # checkpoint_dir = r'./checkpoints/deeplabv3plus/Potsdam2Vaihingen_results_ada/deeplabv3plus_r50-d8_4x4_512x512_40k_ADA_0.022_40_normalize2_region_relate_entropy&impurity_k=9/'

    # select_ratio = 0.25
    # select_ratio = 0.5
    select_ratio = 0.75
    # select_ratio = 1.0
    output_viz_dir = os.path.join(checkpoint_dir, f'viz_active_map_ratio={select_ratio}')
    output_json = os.path.join(checkpoint_dir, f'impurity_scores_ratio={select_ratio}.json')

    # Create a dictionary to store the results
    results = {}
    all_impurity_scores = []

    file_list = os.listdir(os.path.join(checkpoint_dir, 'avtive_mask'))
    active_fn = RegionRelateEntropyScore(in_channels=7, size=9).cuda()

    for file_name in tqdm(file_list):
        assert os.path.exists(os.path.join(checkpoint_dir, 'avtive_mask', file_name))
        assert os.path.exists(os.path.join(img_dir, file_name))
        assert os.path.exists(os.path.join(ann_dir, file_name))

        # Prepare file paths
        file_path, img_path, ann_path = map(lambda x: os.path.join(x, file_name), [os.path.join(checkpoint_dir, 'avtive_mask'), img_dir, ann_dir])

        # Read images and annotations
        active_map, img, ann = map(lambda x: read_img(x), [file_path, img_path, ann_path])
        active_map = random_mask(active_map,1-select_ratio)
        viz_res = get_viz_active_mask(img, active_map)
        visualize_img_(viz_res, save_path=os.path.join(output_viz_dir, file_name))

        # Convert annotation to tensor and calculate impurity score
        ann = torch.tensor(ann[:, :, 0], dtype=torch.int64).cuda()
        impurity_score = active_fn.cal_impurity_by_predict(ann).cuda().cpu().numpy()

        # Get active score based on active map
        active_score = impurity_score[active_map[:, :, 0] != 255]
        mean_score = np.mean(active_score)

        # Convert numpy.float32 to Python float
        mean_score = float(mean_score)

        # Save results to dictionary
        results[file_name] = {
            "impurity_score": mean_score,
            "image_path": img_path,
            "annotation_path": ann_path
        }

        all_impurity_scores.append(mean_score)

        print(f"img: {file_name} Impurity score: {mean_score}")

    # Calculate the overall average impurity score
    overall_average_score = np.mean(all_impurity_scores)

    # Create a dictionary for overall statistics
    overall_stats = {
        "average_impurity_score": overall_average_score,
        "total_images": len(file_list),
        "image_results": results
    }

    # Save the results to a JSON file
    with open(output_json, "w") as json_file:
        json.dump(overall_stats, json_file, indent=4)

    print("Results saved to impurity_scores.json")
