import csv
import logging
import os
import random

import matplotlib.pyplot as plt
import numpy as np
import PIL
import torch
import tqdm
import pandas as pd  # 添加用于处理 Excel 的库

LOGGER = logging.getLogger(__name__)


def plot_segmentation_images(
        savefolder,
        image_paths,
        segmentations,
        anomaly_scores=None,
        mask_paths=None,
        image_transform=lambda x: x,
        mask_transform=lambda x: x,
        save_depth=4,
        dataloader=None,  # 用于获取原始图像和掩码后图像
):
    """Generate anomaly segmentation images.

    Args:
        image_paths: List[str] List of paths to images.
        segmentations: [List[np.ndarray]] Generated anomaly segmentations.
        anomaly_scores: [List[float]] Anomaly scores for each image.
        mask_paths: [List[str]] List of paths to ground truth masks.
        image_transform: [function or lambda] Optional transformation of images.
        mask_transform: [function or lambda] Optional transformation of masks.
        save_depth: [int] Number of path-strings to use for image savenames.
        dataloader: [DataLoader] Optional dataloader for obtaining masked images.
    """
    if mask_paths is None:
        masks_provided = False
    else:
        masks_provided = True
    if anomaly_scores is None:
        anomaly_scores = ["-1" for _ in range(len(image_paths))]

    os.makedirs(savefolder, exist_ok=True)

    # 创建掩码图像保存目录
    masked_images_folder = os.path.join(os.path.dirname(savefolder), "re", "mask")
    original_images_folder = os.path.join(os.path.dirname(savefolder), "re", "original")
    os.makedirs(masked_images_folder, exist_ok=True)
    os.makedirs(original_images_folder, exist_ok=True)

    # 记录异常分数的列表
    records = []

    for i, (image_path, segmentation) in enumerate(zip(image_paths, segmentations)):
        anomaly_score = anomaly_scores[i] if anomaly_scores is not None else None
        mask_path = mask_paths[i] if mask_paths is not None else None
        
        image = PIL.Image.open(image_path).convert("RGB")
        image = image_transform(image)
        if not isinstance(image, np.ndarray):
            image = image.numpy()

        if masks_provided and mask_path is not None:
            mask = PIL.Image.open(mask_path).convert("RGB")
            mask = mask_transform(mask)
            if not isinstance(mask, np.ndarray):
                mask = mask.numpy()
        else:
            mask = np.zeros_like(image)

        savename = image_path.split("/")
        savename = "_".join(savename[-save_depth:])
        savename = os.path.join(savefolder, savename)
        
        # 调整子图数量：如果没有掩码，只创建2个子图
        num_subplots = 2 if not masks_provided else 3
        f, axes = plt.subplots(1, num_subplots)
        
        # 显示原始图像
        axes[0].imshow(image.transpose(1, 2, 0))
        axes[0].set_title("Original")
        
        # 如果有掩码，显示掩码；如果没有，直接显示分割结果
        if masks_provided:
            axes[1].imshow(mask.transpose(1, 2, 0))
            axes[1].set_title("GT Mask")
            axes[2].imshow(segmentation)
            axes[2].set_title("Prediction")
        else:
            axes[1].imshow(segmentation)
            axes[1].set_title("Prediction")
        
        f.set_size_inches(3 * num_subplots, 3)
        f.tight_layout()

        # 确保anomaly_score是标量
        if isinstance(anomaly_score, (np.ndarray, torch.Tensor)):
            anomaly_score = anomaly_score.item() if hasattr(anomaly_score, 'item') else float(anomaly_score)
        
        # 设置英文标题，避免字体问题
        plt.suptitle(f"Anomaly Score: {anomaly_score:.2f}")
        
        # 保存并关闭
        f.savefig(savename)
        plt.close()
        
        # 记录异常分数
        records.append({"Image File Name": savename, "Anomaly Score": anomaly_score})

        # 保存原始图像和掩码后图像
        if dataloader is not None:
            for batch_idx, batch in enumerate(dataloader):
                image_paths_batch = batch["image_path"]
                for j, path in enumerate(image_paths_batch):
                    if path == image_path:
                        # 找到匹配的图像
                        # 保存原始图像
                        if "original_image" in batch:
                            orig_img = batch["original_image"][j]
                            # 反归一化
                            in_std = np.array(dataloader.dataset.transform_std).reshape(-1, 1, 1)
                            in_mean = np.array(dataloader.dataset.transform_mean).reshape(-1, 1, 1)
                            orig_img_np = np.clip((orig_img.cpu().numpy() * in_std + in_mean) * 255, 0, 255).astype(np.uint8)
                            orig_filename = os.path.basename(image_path)
                            orig_savename = os.path.join(original_images_folder, orig_filename)
                            PIL.Image.fromarray(orig_img_np.transpose(1, 2, 0)).save(orig_savename)
                        
                        # 保存掩码后图像
                        masked_img = batch["image"][j]
                        # 反归一化
                        masked_img_np = np.clip((masked_img.cpu().numpy() * in_std + in_mean) * 255, 0, 255).astype(np.uint8)
                        masked_filename = os.path.basename(image_path)
                        masked_savename = os.path.join(masked_images_folder, masked_filename)
                        PIL.Image.fromarray(masked_img_np.transpose(1, 2, 0)).save(masked_savename)
                        break

    # 将记录写入 Excel 文件
    if records:
        # results_file = os.path.join(savefolder, "Detection_Results.xlsx")
        results_file = "/home/guojing/project/patchcore/mvtec_detection_results.xlsx"
        df = pd.DataFrame(records)
        df.to_excel(results_file, index=False)
        print(f"Anomaly scores saved to {results_file}")


def create_storage_folder(
        main_folder_path, project_folder, group_folder, mode="iterate"
):
    os.makedirs(main_folder_path, exist_ok=True)
    project_path = os.path.join(main_folder_path, project_folder)
    os.makedirs(project_path, exist_ok=True)
    save_path = os.path.join(project_path, group_folder)
    if mode == "iterate":
        counter = 0
        while os.path.exists(save_path):
            save_path = os.path.join(project_path, group_folder + "_" + str(counter))
            counter += 1
        os.makedirs(save_path)
    elif mode == "overwrite":
        os.makedirs(save_path, exist_ok=True)

    return save_path


def set_torch_device(gpu_ids):
    """Returns correct torch.device.

    Args:
        gpu_ids: [list] list of gpu ids. If empty, cpu is used.
    """
    if len(gpu_ids):
        # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        # os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_ids[0])
        return torch.device("cuda:{}".format(gpu_ids[0]))
    return torch.device("cpu")


def fix_seeds(seed, with_torch=True, with_cuda=True):
    """Fixed available seeds for reproducibility.

    Args:
        seed: [int] Seed value.
        with_torch: Flag. If true, torch-related seeds are fixed.
        with_cuda: Flag. If true, torch+cuda-related seeds are fixed
    """
    random.seed(seed)
    np.random.seed(seed)
    if with_torch:
        torch.manual_seed(seed)
    if with_cuda:
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True


def compute_and_store_final_results(
        results_path,
        results,
        row_names=None,
        column_names=[
            "Instance AUROC",
            "Full Pixel AUROC",
            "Full PRO",
            "Anomaly Pixel AUROC",
            "Anomaly PRO",
        ],
):
    """Store computed results as CSV file.

    Args:
        results_path: [str] Where to store result csv.
        results: [List[List]] List of lists containing results per dataset,
                 with results[i][0] == 'dataset_name' and results[i][1:6] =
                 [instance_auroc, full_pixelwisew_auroc, full_pro,
                 anomaly-only_pw_auroc, anomaly-only_pro]
    """
    if row_names is not None:
        assert len(row_names) == len(results), "#Rownames != #Result-rows."

    # print(len(results))
    mean_metrics = {}
    for i, result_key in enumerate(column_names):
        mean_metrics[result_key] = np.mean([x[i] for x in results])
        # LOGGER.info("aaaaaaa")
        LOGGER.info("{0}: {1:3.3f}".format(result_key, mean_metrics[result_key]))

    savename = os.path.join(results_path, "results.csv")
    with open(savename, "w") as csv_file:
        csv_writer = csv.writer(csv_file, delimiter=",")
        header = column_names
        if row_names is not None:
            header = ["Row Names"] + header

        csv_writer.writerow(header)
        for i, result_list in enumerate(results):
            csv_row = result_list
            if row_names is not None:
                csv_row = [row_names[i]] + result_list
            csv_writer.writerow(csv_row)
        mean_scores = list(mean_metrics.values())
        if row_names is not None:
            mean_scores = ["Mean"] + mean_scores
        csv_writer.writerow(mean_scores)

    mean_metrics = {"mean_{0}".format(key): item for key, item in mean_metrics.items()}
    return mean_metrics
