import torch

# from torch import nn
from typing import overload
from monai.inferers import SlidingWindowSplitter, Merger, AvgMerger

import data.transforms as transforms


class SlidingWindowInferer(torch.nn.Module):
    def __init__(
        self,
        patch_size,
        overlap=0.0,
        offset=0,
        filter_fn=None,
        pad_mode="constant",
        pad_value=0,
        device=None,
        *args,
        **kwargs
    ):
        super().__init__()
        self.splitter = SlidingWindowSplitter(
            patch_size,
            overlap,
            offset,
            filter_fn=filter_fn,
            pad_mode=pad_mode,
            pad_value=pad_value,
            device=device,
        )

    # @overload
    def to(self, device):
        self.splitter.device = device

    def forward(self, batch, predictor):
        for x, loc in self.splitter(batch["images"]):
            y = predictor(x)
            # y = self.merger(y, loc)


def split_seg_from_imgs(data, seg_type, seg_channels): 
    data[seg_type] = data["images"][:, -seg_channels: ]
    data["images"] = data["images"][:, :-seg_channels] 
    return data

class MultiMAESlidingWindowInferer(SlidingWindowInferer):
    def __init__(
        self,
        seg_type,
        input_tasks,
        output_tasks,
        patch_size,
        overlap=0.0,
        offset=0,
        filter_fn=None,
        pad_mode="constant",
        pad_value=0,
        return_patches: bool = False,
        device=None,
        seg_channels: int = 1, 
        *args,
        **kwargs
    ):
        super().__init__(
            patch_size=patch_size,
            overlap=overlap,
            offset=offset,
            filter_fn=filter_fn,
            pad_mode=pad_mode,
            pad_value=pad_value,
            device=device,
        )
        self.input_tasks = input_tasks
        self.output_tasks = output_tasks
        self.seg_type = seg_type
        self.patch_size = patch_size
        self.return_patches = return_patches
        self.device = device
        
        self.seg_channels = seg_channels

        self.split_seg_from_imgs = transforms.Compose([
            transforms.Lambda(
                func= lambda data: split_seg_from_imgs(data, seg_type, seg_channels)
            ),
        ])

        self.input_tasks_no_seg = [task for task in input_tasks if task != seg_type]

        self.split_imgs = transforms.Compose(
            [
                transforms.SplitDimd(
                    keys="images", output_postfixes=self.input_tasks_no_seg, dim=1, update_meta=True
                ),
                transforms.DeleteItemsd(
                    keys=[
                        "images",
                    ]
                ), 
                transforms.Lambda(
                    func=lambda data: {
                        k.replace("images_", ""): v for k, v in data.items()
                    }
                ),
            ]
        )
        
    def forward(self, batch, predictor):
        # need to merge everything because splitter can work on one image at a time
        if "images" in batch: 
            img = batch["images"]
        else: 
            img = torch.cat([batch[task] for task in self.input_tasks_no_seg], dim=1)
        img_channels = img.shape[1]

        if self.seg_type is not None and self.seg_type in self.input_tasks:
            seg = batch[self.seg_type]
            img = torch.cat([img, seg], dim=1)

        outputs = []
        locations = []
        split_patches = []
        padded_shape = self.splitter.get_padded_shape(img)
        seg_cropped_shape = (img.shape[0], self.seg_channels) + img.shape[-3:] 
        seg_merged_shape = (img.shape[0], self.seg_channels) + padded_shape
        cropped_shape = (img.shape[0], img_channels) + img.shape[-3:]
        merged_shape = (img.shape[0], img_channels) + padded_shape
        
        for x, loc in self.splitter(img):
            if isinstance(self.patch_size, int):
                crop_end = tuple([d + self.patch_size for d in list(loc)])
            else:
                crop_end = tuple([d + p for d, p in zip(list(loc), self.patch_size)])
            images_crop_start = torch.tensor(loc)[None, :].repeat(img.shape[0], 1).float().to(self.device)
            images_crop_end = torch.tensor(crop_end)[None, :].repeat(img.shape[0], 1).float().to(self.device) 
            crop_start_keys = [key for key in batch.keys() if 'crop_start' in key]
            images_crop_start += batch[crop_start_keys[0]]
            images_crop_end += batch[crop_start_keys[0]]
             
            split_batch = {
                "images": x,
                "crop_start": images_crop_start,
                "crop_end": images_crop_end,
            }
            if self.seg_type in self.input_tasks: 
                split_batch = self.split_seg_from_imgs(split_batch)
            if "images" not in batch: 
                split_batch = self.split_imgs(split_batch)
            
            outputs.append(
                predictor.forward(
                    split_batch, return_as_image=True, return_as_dict=True
                )
            )
            locations.append(loc)
            if self.return_patches: 
                split_patches.append(split_batch)
            
        reconstructed_image = {}
        selected_patches = {}
        
        for task in self.input_tasks:
            sel_merger = AvgMerger(
                merged_shape=seg_merged_shape if task==self.seg_type else merged_shape,
                cropped_shape=seg_cropped_shape if task==self.seg_type else cropped_shape,
                device=self.device,
            )
            for idx, output in enumerate(outputs):
                location = locations[idx]
                sel_values = output["selected_patches"][task]
                sel_merger.aggregate(sel_values, location)
            sel_task = sel_merger.get_values()
            sel_merger.finalize()
            selected_patches[task] = sel_task
        
        for task in self.output_tasks:
            task_n_output_channels = outputs[0]["reconstructed_patches"][task].shape[-4] 
            task_output_shape = (merged_shape[0], task_n_output_channels) + merged_shape[-3:]
            recon_merger = AvgMerger(
                merged_shape=seg_merged_shape if task==self.seg_type else merged_shape,
                cropped_shape=seg_cropped_shape if task==self.seg_type else cropped_shape,
                device=self.device,
            )
            for idx, output in enumerate(outputs):
                location = locations[idx]
                recon_values = output["reconstructed_patches"][task]
                recon_merger.aggregate(recon_values, location)
            recon_merger.finalize()
            merged_task = recon_merger.get_values()
            reconstructed_image[task] = merged_task
        return {
            "reconstructed_image": reconstructed_image,
            "selected_patches": selected_patches,
            "patches": split_patches,
            "reconstructed_patches": [o['reconstructed_patches'] for o in outputs], 
        }
