import os
import torch
from torch import nn
from mmseg.apis import inference_model as seg_inference_model
from mmseg.apis import init_model as seg_init_model
import mmcv
import numpy as np
import torch.nn.functional as F
from mmseg.models.necks import FPN
from mmseg.models.decode_heads import FPNHead
from mmcv.cnn import ConvModule
from mmseg.registry import MODELS
import cv2

from mmcv.transforms.processing import Resize, Pad, Compose
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms.loading import LoadImageFromNDArray

from torch.utils.data import DataLoader, Dataset
import time
from mmseg.structures import SegDataSample
from tqdm import tqdm
from mmseg.apis import init_model, inference_model


batch_size=4
num_workers=4

class UpsampleX(nn.Module):
    def __init__(self,
                 size=None,
                 scale_factor=2.0,
                 mode='nearest',
                 align_corners=None):
        super(UpsampleX, self).__init__()
        self.size = size
        self.scale_factor = scale_factor
        self.mode = mode
        self.align_corners = align_corners

    def forward(self, x):
        return F.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners)

@MODELS.register_module()
class FPNHeadX(FPNHead):
    def __init__(self, feature_strides, **kwargs):
        super().__init__(feature_strides, **kwargs)
        assert len(feature_strides) == len(self.in_channels)
        assert min(feature_strides) == feature_strides[0]
        self.feature_strides = feature_strides

        self.scale_heads = nn.ModuleList()
        for i in range(len(feature_strides)):
            head_length = max(
                1,
                int(np.log2(feature_strides[i]) - np.log2(feature_strides[0])))
            scale_head = []
            for k in range(head_length):
                scale_head.append(
                    ConvModule(
                        self.in_channels[i] if k == 0 else self.channels,
                        self.channels,
                        3,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg,
                        act_cfg=self.act_cfg))
                if feature_strides[i] != feature_strides[0]:
                    scale_head.append(UpsampleX(scale_factor=2.0))
            self.scale_heads.append(nn.Sequential(*scale_head))

    def forward(self, inputs):
        x = self._transform_inputs(inputs)
        output = self.scale_heads[0](x[0])
        for i in range(1, len(self.feature_strides)):
            y = self.scale_heads[i](x[i])
            output = output + y
        output = self.cls_seg(output)
        return output

@MODELS.register_module()
class FPNX(FPN):
    def forward(self, inputs):
        assert len(inputs) == len(self.in_channels)

        # build laterals
        laterals = [
            lateral_conv(inputs[i + self.start_level])
            for i, lateral_conv in enumerate(self.lateral_convs)
        ]
        used_backbone_levels = len(laterals)
        for i in range(used_backbone_levels - 1, 0, -1):
            laterals[i - 1] = laterals[i - 1] + F.interpolate(laterals[i], size=None, scale_factor=2.0, mode='nearest',
                                                              align_corners=None)

        # build outputs
        # part 1: from original levels
        outs = [
            self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
        ]
        # part 2: add extra levels
        if self.num_outs > len(outs):
            # use max pool to get more levels on top of outputs
            # (e.g., Faster R-CNN, Mask R-CNN)
            if not self.add_extra_convs:
                for i in range(self.num_outs - used_backbone_levels):
                    outs.append(F.max_pool2d(outs[-1], 1, stride=2))
            # add conv layers on top of original feature maps (RetinaNet)
            else:
                if self.add_extra_convs == 'on_input':
                    extra_source = inputs[self.backbone_end_level - 1]
                elif self.add_extra_convs == 'on_lateral':
                    extra_source = laterals[-1]
                elif self.add_extra_convs == 'on_output':
                    extra_source = outs[-1]
                else:
                    raise NotImplementedError
                outs.append(self.fpn_convs[used_backbone_levels](extra_source))
                for i in range(used_backbone_levels + 1, self.num_outs):
                    if self.relu_before_extra_convs:
                        outs.append(self.fpn_convs[i](F.relu(outs[-1])))
                    else:
                        outs.append(self.fpn_convs[i](outs[-1]))
        return tuple(outs)


class CustomImageDataset(Dataset):
    def __init__(self, image_paths):
        self.image_paths = image_paths
        self.transformer = Compose([
            LoadImageFromFile(),
            Resize(scale=(256, 256), keep_ratio=False), 
        ])

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        image_path = self.image_paths[idx]
        data_ = dict(img_path=image_path, img_id=0)
        img = self.transformer(data_)

        data = img['img']
        ori_shape = img['ori_shape']
        img_path = img['img_path']

        img = np.ascontiguousarray(data.transpose(2, 0, 1))
        img = torch.from_numpy(img)

        mean = torch.tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
        std = torch.tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)

        img = (img[[2, 1, 0], ...]).float()
        img = (img - mean) / std

        return img, ori_shape, img_path

def collect(batch):
    tensors, ori_size, img_path = zip(*batch)
    imgs = torch.stack(tensors)
    return imgs, (ori_size, img_path)


from mmseg.models.backbones import ResNetV1c
from mmseg.models.utils import resize

class Model(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.backbone = ResNetV1c(
            depth=18,
            num_stages=4,
            out_indices=(0, 1, 2, 3),
            dilations=(1, 1, 1, 1),
            strides=(1, 2, 2, 2),
            norm_cfg=dict(type='BN', requires_grad=True),
            norm_eval=False,
        )
        self.neck = FPNX(
            in_channels=[64, 128, 256, 512],
            out_channels=128,
            num_outs=4
        )
        self.decode_head = FPNHeadX(
            in_channels=[128, 128, 128, 128],
            in_index=[0, 1, 2, 3],
            feature_strides=[4, 8, 16, 32],
            channels=64,  #
            dropout_ratio=0.1,
            num_classes=2,
            norm_cfg=dict(type='BN', requires_grad=True),
            align_corners=False,
        )

    def forward(self, x, ori_size, img_paths):
        n = x.size(0)
        assert n == len(ori_size)
        x = self.backbone(x)
        x = self.neck(x)
        batch_img_metas = [dict(img_shape=(256, 256)) for _ in range(n)]
        seg_logits = self.decode_head.predict(x, batch_img_metas, None)

        batch_size, C, H, W = seg_logits.shape
        padding_size = [0] * 4

        padding_left, padding_right, padding_top, padding_bottom = padding_size

        res = [None for _ in range(batch_size)]
        for i in range(batch_size):
            i_seg_logits = seg_logits[i:i + 1, :, padding_top:H - padding_bottom, padding_left:W - padding_right]
            i_seg_logits = resize(
                    i_seg_logits,
                    size=ori_size[i],
                    mode='bilinear',
                    align_corners=False,
                    warning=False).squeeze(0)

            i_seg_pred = i_seg_logits.argmax(dim=0, keepdim=True)
            img_path = img_paths[i]

            res[i] = {
                'seg_logits': i_seg_logits,
                'pred_sem_seg': i_seg_pred,
                'img_path': img_path
            }

        return res


def get_seg_model(checkpoint_path):
    device = torch.device("cuda:0")
    model = Model().eval().to(device)
    weight = torch.load(checkpoint_path)['state_dict']
    print(checkpoint_path)
    model.load_state_dict(weight)
    return model

def seg_infer(model,img_dirs):
    # Test a single image and show the results
    results = []
    dataset = CustomImageDataset(img_dirs)
    dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collect)

    device = torch.device("cuda:0")


    for _batch_inputs, (ori_size, img_path) in tqdm(dataloader):
        _batch_inputs = _batch_inputs.float()
        res = model(_batch_inputs.to(device), ori_size, img_path)
        for instanceData in res:
            
            img_path = instanceData.get('img_path')
            mask = instanceData.get('pred_sem_seg').cpu().numpy()
            mask = np.squeeze(mask)
            results.append((img_path, mask))
    return results