# Copyright (c) Meta Platforms, Inc. All Rights Reserved

import os
import numpy as np
from PIL import Image

import torch
import torchvision
import imageio
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities.distributed import rank_zero_only

from vq_gan_3d.utils import save_video_grid


class ImageLogger(Callback):
    def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True):
        super().__init__()
        self.batch_freq = batch_frequency
        self.max_images = max_images
        self.log_steps = [
            2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
        if not increase_log_steps:
            self.log_steps = [self.batch_freq]
        self.clamp = clamp

    @rank_zero_only
    def log_local(self, save_dir, split, images,
                  global_step, current_epoch, batch_idx):
        root = os.path.join(save_dir, "images", split)
        # print(root)
        #mean = images.pop('mean_org')
        #mean = mean[(None,)*3].swapaxes(0, -1)
        #std = images.pop('std_org')
        #std = std[(None,)*3].swapaxes(0, -1)
        for k in images:
            images[k] = (images[k] + 1.0) * 127.5  # std + mean
            torch.clamp(images[k], 0, 255)
            grid = torchvision.utils.make_grid(images[k], nrow=4)
            grid = grid
            grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
            grid = grid.numpy()
            grid = (grid).astype(np.uint8)
            filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
                k,
                global_step,
                current_epoch,
                batch_idx)
            path = os.path.join(root, filename)
            os.makedirs(os.path.split(path)[0], exist_ok=True)
            Image.fromarray(grid).save(path)

    def log_img(self, pl_module, batch, batch_idx, split="train"):
        if (self.check_frequency(batch_idx) and  # batch_idx % self.batch_freq == 0
                hasattr(pl_module, "log_images") and
                callable(pl_module.log_images) and
                self.max_images > 0):
            logger = type(pl_module.logger)

            is_train = pl_module.training
            if is_train:
                pl_module.eval()

            with torch.no_grad():
                # 新增 1 行
                with torch.autocast(device_type="npu"):
                    images = pl_module.log_images(batch, split=split)

            for k in images:
                N = min(images[k].shape[0], self.max_images)
                images[k] = images[k][:N]
                if isinstance(images[k], torch.Tensor):
                    images[k] = images[k].detach().cpu()
                    # 新增 两行
                    # AMP可能会输出float16，转回float32以便于后续处理（如保存为图片）
                    if images[k].dtype == torch.float16:
                        images[k] = images[k].float()

            self.log_local(pl_module.logger.save_dir, split, images,
                           pl_module.global_step, pl_module.current_epoch, batch_idx)

            if is_train:
                pl_module.train()

    def check_frequency(self, batch_idx):
        if (batch_idx % self.batch_freq) == 0 or (batch_idx in self.log_steps):
            try:
                self.log_steps.pop(0)
            except IndexError:
                pass
            return True
        return False

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
        self.log_img(pl_module, batch, batch_idx, split="train")

    def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
        self.log_img(pl_module, batch, batch_idx, split="val")


class VideoLogger(Callback):
    def __init__(self, batch_frequency, max_videos, clamp=True, increase_log_steps=True):
        super().__init__()
        self.batch_freq = batch_frequency
        self.max_videos = max_videos
        self.log_steps = [
            2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
        if not increase_log_steps:
            self.log_steps = [self.batch_freq]
        self.clamp = clamp

    @rank_zero_only
    def log_local(self, save_dir, split, videos,
                  global_step, current_epoch, batch_idx):
        root = os.path.join(save_dir, "videos", split)
        # print(root)
        #mean = videos.pop('mean_org')
        #mean = mean[(None,)*4].swapaxes(0, -1)
        #std = videos.pop('std_org')
        #std = std[(None,)*4].swapaxes(0, -1)
        for k in videos:
            videos[k] = (videos[k] + 1.0) * 127.5  # std + mean
            torch.clamp(videos[k], 0, 255)
            videos[k] = videos[k] / 255.0
            grid = videos[k]
            filename = "{}_gs-{:06}_e-{:06}_b-{:06}.mp4".format(
                k,
                global_step,
                current_epoch,
                batch_idx)
            path = os.path.join(root, filename)
            os.makedirs(os.path.split(path)[0], exist_ok=True)
            try:
                save_video_grid(grid, path)
            except Exception as e:
                print(f"无法保存MP4文件 {path}: {e}")

    def log_vid(self, pl_module, batch, batch_idx, split="train"):
        # print(batch_idx, self.batch_freq, self.check_frequency(batch_idx) and hasattr(pl_module, "log_videos") and callable(pl_module.log_videos) and self.max_videos > 0)
        if (self.check_frequency(batch_idx) and  # batch_idx % self.batch_freq == 0
                hasattr(pl_module, "log_videos") and
                callable(pl_module.log_videos) and
                self.max_videos > 0):
            # print(batch_idx, self.batch_freq,  self.check_frequency(batch_idx))
            logger = type(pl_module.logger)

            is_train = pl_module.training
            if is_train:
                pl_module.eval()

            with torch.no_grad():
                # 新增一行
                with torch.autocast(device_type="npu"):
                    videos = pl_module.log_videos(
                        batch, split=split, batch_idx=batch_idx)

            for k in videos:
                N = min(videos[k].shape[0], self.max_videos)
                videos[k] = videos[k][:N]
                if isinstance(videos[k], torch.Tensor):
                    videos[k] = videos[k].detach().cpu()
                    # 新增两行
                    # 同样，确保数据类型是float32以便于后续处理
                    if videos[k].dtype == torch.float16:
                        videos[k] = videos[k].float()

            self.log_local(pl_module.logger.save_dir, split, videos,
                           pl_module.global_step, pl_module.current_epoch, batch_idx)

            if is_train:
                pl_module.train()

    def check_frequency(self, batch_idx):
        if (batch_idx % self.batch_freq) == 0 or (batch_idx in self.log_steps):
            try:
                self.log_steps.pop(0)
            except IndexError:
                pass
            return True
        return False

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
        self.log_vid(pl_module, batch, batch_idx, split="train")

    def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
        self.log_vid(pl_module, batch, batch_idx, split="val")

# class VideoLogger(Callback):
#     def __init__(self, batch_frequency, max_videos, clamp=True, increase_log_steps=True):
#         super().__init__()
#         self.batch_freq = batch_frequency
#         self.max_videos = max_videos
#         self.log_steps = [
#             2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
#         if not increase_log_steps:
#             self.log_steps = [self.batch_freq]
#         self.clamp = clamp
#
#     @rank_zero_only
#     def log_local(self, save_dir, split, videos,
#                   global_step, current_epoch, batch_idx):
#         root = os.path.join(save_dir, "videos", split)
#
#         for k in videos:
#             # videos[k] 是一个批次数据，形状为 (B, C, D, H, W)
#             batch_tensor = videos[k]
#
#             # 从批次中选择第一个样本进行可视化
#             # new shape: (C, D, H, W)
#             single_volume_tensor = batch_tensor[0]
#
#             # 1. 确保是 float32 类型
#             grid_tensor_float = single_volume_tensor.float()
#             grid_np = grid_tensor_float.numpy()
#             # grid_np = grid_tensor_float.detach().cpu().numpy()
#
#             # 2. 通用的 Min-Max 归一化 (鲁棒性最强)
#             # a. 先计算当前数组的 min 和 max
#             vmin = grid_np.min()
#             vmax = grid_np.max()
#
#             # b. 避免除以零的错误 (如果整个数组是常数)
#             if vmin == vmax:
#                 # 如果所有值都一样，就把它设为一个中间灰色值
#                 grid_np = np.full_like(grid_np, 128)
#             else:
#                 # c. 线性拉伸到 [0, 255] 范围
#                 grid_np = (grid_np - vmin) / (vmax - vmin) * 255.0
#
#             # 3. 转换为 uint8
#             # 经过上一步，可以确保值都在 0-255 范围内
#             grid_np = grid_np.astype(np.uint8)
#
#             # 为4D数组 (C, D, H, W) 提供正确的 transpose 指令，变为 (D, H, W, C)
#             if grid_np.ndim == 4:
#                 grid_np = np.transpose(grid_np, (1, 2, 3, 0))
#             else:
#                 print(f"警告: 预期的4D数组 (C,D,H,W) 未收到，实际维度为 {grid_np.ndim}。跳过此视频。")
#                 continue
#
#             # 修改文件名为 .gif
#             filename = "{}_gs-{:06}_e-{:06}_b-{:06}.gif".format(
#                 k,
#                 global_step,
#                 current_epoch,
#                 batch_idx)
#             path = os.path.join(root, filename)
#             os.makedirs(os.path.split(path)[0], exist_ok=True)
#
#             # 使用 imageio 保存为 GIF
#             try:
#                 # 准备要保存的帧列表
#                 frames_to_save = []
#                 for frame in grid_np:
#                     # frame 的原始形状是 (H, W, C)
#                     # 如果通道数 C 为 1, 将其形状从 (H, W, 1) 压缩为 (H, W)
#                     if frame.shape[2] == 1:
#                         frame = np.squeeze(frame, axis=2)
#
#                     # 现在 frame 的形状应该是 (H, W) 或 (H, W, 3) 等 imageio 支持的格式
#                     frames_to_save.append(frame)
#
#                 # 使用处理过的帧列表来保存 GIF
#                 imageio.mimsave(path, frames_to_save, fps=8)
#
#             except Exception as e:
#                 print(f"无法保存GIF文件 {path}: {e}")
#
#     def log_vid(self, pl_module, batch, batch_idx, split="train"):
#         if (self.check_frequency(batch_idx) and
#                 hasattr(pl_module, "log_videos") and
#                 callable(pl_module.log_videos) and
#                 self.max_videos > 0):
#             logger = type(pl_module.logger)
#
#             is_train = pl_module.training
#             if is_train:
#                 pl_module.eval()
#
#             with torch.no_grad():
#                 with torch.autocast(device_type="npu"):
#                     videos = pl_module.log_videos(
#                         batch, split=split, batch_idx=batch_idx)
#
#             for k in videos:
#                 N = min(videos[k].shape[0], self.max_videos)
#                 videos[k] = videos[k][:N]
#                 if isinstance(videos[k], torch.Tensor):
#                     videos[k] = videos[k].detach().cpu()
#                     if videos[k].dtype == torch.float16:
#                         videos[k] = videos[k].float()
#
#             self.log_local(pl_module.logger.save_dir, split, videos,
#                            pl_module.global_step, pl_module.current_epoch, batch_idx)
#
#             if is_train:
#                 pl_module.train()
#
#     def check_frequency(self, batch_idx):
#         if (batch_idx % self.batch_freq) == 0 or (batch_idx in self.log_steps):
#             try:
#                 self.log_steps.pop(0)
#             except IndexError:
#                 pass
#             return True
#         return False
#
#     def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
#         self.log_vid(pl_module, batch, batch_idx, split="train")
#
#     def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
#         self.log_vid(pl_module, batch, batch_idx, split="val")