import logging
import os

import torch
from PIL import Image
from torch.utils.data import DataLoader
from torchvision.transforms import (ToPILImage, ToTensor)

from modelling.datasets import DataConfig, collaters_factory, datasets_factory
from modelling.configs import model_configs_factory
from modelling.models import models_factory
from utils.parser import Parser
from utils.train_inference_utils import get_device, move_batch_to_device

from utils.gradcam_utils import (
    GradCAM,
    show_box_cam_on_image, 
    show_cam_on_image
)

os.environ['CUDA_VISIBLE_DEVICES']= '0'

def unload_frame(save_root, vid, fid, frame):

    unloder = ToPILImage()
    frame = frame.cpu().clone()
    frame = unloder(frame)
    if not os.path.exists(os.path.join(save_root, vid)):
        os.makedirs(os.path.join(save_root, vid))
    frame.save(os.path.join(save_root, vid, '%06d.jpg' % (fid + 1)))

def load_frames(root, vids, fids):

    bat = len(vids)
    videos = []
    for i in range(bat):
        frame_ls = fids[i]
        raw_video_frames = [
            Image.open(os.path.join(root, vids[i], '%06d.jpg' % (fid + 1))).convert('RGB')
            for fid in frame_ls
        ]
        trans = ToTensor()
        video_frames = []
        for i in range(len(raw_video_frames)):
            frame = raw_video_frames[i]
            frame = trans(frame)
            video_frames.append(frame)

        video_frames = torch.stack(video_frames, dim=0).transpose(0, 1) # c x t x h x w
        videos.append(video_frames)

    return torch.stack(videos, dim=0)


def visualize(args):
    # Set up logging
    logging.basicConfig(level=logging.INFO)
    # Check for CUDA
    device = get_device(logger=logging.getLogger(__name__))
    logging.info("Preparing dataset...")
    data_config = DataConfig(
        dataset_name=args.dataset_name,
        dataset_path=args.test_dataset_path,
        labels_path=args.labels_path,
        videoid2size_path=args.videoid2size_path,
        layout_num_frames=args.layout_num_frames,
        appearance_num_frames=args.appearance_num_frames,
        videos_path=args.videos_path,
        train=False,
    )
    test_dataset = datasets_factory[args.dataset_type](data_config)
    num_samples = len(test_dataset)
    logging.info(f"Inference on {num_samples}")
    collater = collaters_factory[args.dataset_type](data_config)
    # Prepare loader
    test_loader = DataLoader(
        test_dataset,
        batch_size=1, # set to 1
        collate_fn=collater,
        num_workers=args.num_workers,
        pin_memory=True if args.num_workers else False,
        shuffle=True,
    )
    logging.info("Preparing model...")
    # Prepare model
    num_classes = len(test_dataset.labels)
    model_config = model_configs_factory[args.model_name](
        num_classes=num_classes,
        unique_categories=len(data_config.category2id),
        num_spatial_layers=args.num_spatial_layers,
        num_temporal_layers=args.num_temporal_layers,
        appearance_num_frames=args.appearance_num_frames,
        resnet_model_path=args.resnet_model_path,
    )
    logging.info("==================================")
    logging.info(f"The model's configuration is:\n{model_config}")
    logging.info("==================================")
    model = models_factory[args.model_name](model_config).to(device)
    try:
        model.load_state_dict(torch.load(args.checkpoint_path, map_location=device))
    except RuntimeError as e:
        logging.warning(
            "Default loading failed, loading with strict=False. If it's only "
            "score_embedding modules it's ok. Otherwise see exception below"
        )
        logging.warning(e)
        model.load_state_dict(
            torch.load(args.checkpoint_path, map_location=device), strict=False
        )

    # target_layers = ["resnet/layer4/2"] 
    target_layers = ["backbone/frames_embeddings/layout_embedding/transformer/layers/2/norm2"]

    cam = GradCAM(model=model, target_layers=target_layers, logit_name=args.model_name)
    
    rgb_imgs = []
    for batch in test_loader:
        batch = move_batch_to_device(batch, device)
        inputs = [batch]
        vids = batch["video_id"]
        labels = batch["labels"]
        if "video_frames" in batch.keys():
            rgb_imgs.append(batch["video_frames"])
        if "boxes" in batch.keys():
            boxes = batch["boxes"]
        if "frame_idx" in batch.keys():
            frame_idx = batch["frame_idx"]
        
        break  # only test one sample

    labels = labels.to(torch.int64)
    heatmap_ls, _ = cam(inputs=inputs, labels=None)

    if len(rgb_imgs) == 0:
        rgb_imgs.append(load_frames(args.videos_path, vids, frame_idx))

    if args.model_name == 'resnet3d':
        result_ls = show_cam_on_image(rgb_imgs, heatmap_ls, 
                                    data_mean=[0.485, 0.456, 0.406], 
                                    data_std=[0.229, 0.224, 0.225]
        )
    elif args.model_name == 'stlt':
        result_ls = show_box_cam_on_image(rgb_imgs, heatmap_ls, boxes)

    for result in result_ls:
        B, T, _, _, _ = result.size()
        for i in range(0, B):
            clip = result[i]
            for j in range(0, T):
                unload_frame(args.visualize_path, vids[i], j, clip[j])
    

def main():
    parser = Parser("Visualization")
    visualize(parser.parse_args())

if __name__ == "__main__":
    main()