import torch
from torch import Tensor
from typing import List, Tuple
from torch import nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.backbones.csp_darknet import CSPDarknet
from mmdet.models.necks.yolox_pafpn import YOLOXPAFPN
from mmdet.models.dense_heads.yolox_head import YOLOXHead
from mmyolo.models.backbones.csp_darknet import SPPFBottleneck
from mmengine.structures import InstanceData
from mmdet.models.layers import CSPLayer
from mmengine.config import ConfigDict
import math
import torch.nn.functional as F
import os.path as osp
import sys
base_dir = osp.dirname(osp.dirname(osp.dirname(osp.dirname(osp.abspath(__file__)))))
sys.path.append("./laketicv")

from torch.utils.data import Dataset, DataLoader
import cv2
import numpy as np
import time


from mmcv.transforms.processing import Resize, Pad, Compose
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms.loading import LoadImageFromNDArray


batch_size = 4
num_workers = 6

class FocusDeploy(nn.Module):
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=1,
                 stride=1,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
                 act_cfg=dict(type='LeakyReLU')):
        super().__init__()
        self.conv = ConvModule(
            in_channels,
            out_channels,
            kernel_size,
            stride*2,
            padding=(kernel_size - 1) // 2,
            conv_cfg=conv_cfg,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg)

    def forward(self, x):
        return self.conv(x)


class CSPDarknetSPPFDeploy(CSPDarknet):
    arch_settings = {
        'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],
               [256, 512, 9, True, False], [512, 1024, 3, False, True]],
        'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False],
               [256, 512, 9, True, False], [512, 768, 3, True, False],
               [768, 1024, 3, False, True]]
    }
    def __init__(self,
                 arch='P5',
                 deepen_factor=1.0,
                 widen_factor=1.0,
                 out_indices=(2, 3, 4),
                 frozen_stages=-1,
                 use_depthwise=False,
                 arch_ovewrite=None,
                 spp_kernal_sizes=(3, 5, 7),
                 conv_cfg=None,
                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
                 act_cfg=dict(type='LeakyReLU'),
                 norm_eval=False,
                 init_cfg=dict(
                     type='Kaiming',
                     layer='Conv2d',
                     a=math.sqrt(5),
                     distribution='uniform',
                     mode='fan_in',
                     nonlinearity='leaky_relu')):
        if type(spp_kernal_sizes) is int:
            spp_kernal_sizes_init = ([spp_kernal_sizes for _ in range(0, 3)])
        elif type(spp_kernal_sizes) in [tuple, list]:
            spp_kernal_sizes_init = spp_kernal_sizes
        else:
            raise ValueError('spp_kernal_size must be `int`, `tuple` or `list`! Please Check!')
        super().__init__(arch, deepen_factor, widen_factor, out_indices, frozen_stages,
                         use_depthwise, arch_ovewrite, spp_kernal_sizes_init, conv_cfg,
                         norm_cfg, act_cfg, norm_eval, init_cfg)
        arch_setting = self.arch_settings[arch]
        if arch_ovewrite:
            arch_setting = arch_ovewrite
        assert set(out_indices).issubset(
            i for i in range(len(arch_setting) + 1))
        if frozen_stages not in range(-1, len(arch_setting) + 1):
            raise ValueError('frozen_stages must be in range(-1, '
                             'len(arch_setting) + 1). But received '
                             f'{frozen_stages}')

        self.out_indices = out_indices
        self.frozen_stages = frozen_stages
        self.use_depthwise = use_depthwise
        self.norm_eval = norm_eval
        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule

        self.stem = FocusDeploy(
            3,
            int(arch_setting[0][0] * widen_factor),
            kernel_size=3,
            conv_cfg=conv_cfg,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg)
        self.layers = ['stem']

        for i, (in_channels, out_channels, num_blocks, add_identity,
                use_spp) in enumerate(arch_setting):
            in_channels = int(in_channels * widen_factor)
            out_channels = int(out_channels * widen_factor)
            num_blocks = max(round(num_blocks * deepen_factor), 1)
            stage = []
            conv_layer = conv(
                in_channels,
                out_channels,
                3,
                stride=2,
                padding=1,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                act_cfg=act_cfg)
            stage.append(conv_layer)
            if use_spp:
                spp = SPPFBottleneck(
                    out_channels,
                    out_channels,
                    conv_cfg=conv_cfg,
                    norm_cfg=norm_cfg,
                    act_cfg=act_cfg)
                stage.append(spp)
            csp_layer = CSPLayer(
                out_channels,
                out_channels,
                num_blocks=num_blocks,
                add_identity=add_identity,
                use_depthwise=use_depthwise,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                act_cfg=act_cfg)
            stage.append(csp_layer)
            self.add_module(f'stage{i + 1}', nn.Sequential(*stage))
            self.layers.append(f'stage{i + 1}')

from mmdet.structures import DetDataSample
from typing import List

SampleList = List[DetDataSample]

class Model(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.backbone = CSPDarknetSPPFDeploy(
            spp_kernal_sizes=3,
        )
        self.neck = YOLOXPAFPN(
            in_channels=[256, 512, 1024],
            out_channels=256,
            num_csp_blocks=3,
            act_cfg=dict(type='LeakyReLU'),
        )
        self.bbox_head = YOLOXHead(
            num_classes=6,
            in_channels=256,
            act_cfg=dict(type='LeakyReLU'),
        )

    @torch.no_grad()
    def forward(self, inputs: Tensor, scale_factor, img_path):
        num_imgs = inputs.size(0)

        x = self.backbone(inputs)
        x: Tuple[Tensor] = self.neck(x)

        results = self.bbox_head(x)

        cls_scores, bbox_preds, objectnesses = results

        batch_img_metas = [ dict(scale_factor= scale_factor[i], img_path=img_path[i] ) for i in range(num_imgs) ]

        results = self.bbox_head.predict_by_feat(
            *results, 
            batch_img_metas=batch_img_metas,
            rescale=True,
            cfg=ConfigDict( dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)) ),
        )
        for i, r in enumerate(results):
            r.set_field(img_path[i], 'img_path')

        return results


class CustomImageDataset(Dataset):
    def __init__(self, image_paths):
        self.image_paths = image_paths
        self.transformer = Compose([
            LoadImageFromFile( file_client_args=dict(backend='disk')),
            Resize(scale=(1024, 1024), keep_ratio=True), 
            Pad(pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))),
        ])

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        image_path = self.image_paths[idx]
        data_ = dict(img_path=image_path, img_id=0)
        img = self.transformer(data_)

        img_path = img['img_path']
        scale_factor = img['scale_factor']

        img = img['img']


        img = np.ascontiguousarray(img.transpose(2, 0, 1))
        img = torch.from_numpy(img).float()
        return img, scale_factor, img_path

def collect(batch):
    tensors, scale_factor, img_path = zip(*batch)
    imgs = torch.stack(tensors)
    return imgs, (scale_factor, img_path)


def get_det_model(checkpoint_path):
    device = torch.device("cuda:0")
    model = Model().eval().to(device)
    weight = torch.load(checkpoint_path)['state_dict']
    model.load_state_dict(weight)
    return model

def det_infer(model,img_dirs, pred_score_thr=0.3):
    # Test a single image and show the results
    results = []
    dataset = CustomImageDataset(img_dirs)
    dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collect)

    device = torch.device("cuda:0")


    for _batch_inputs, (scale_factor, img_path) in dataloader:
        _batch_inputs = _batch_inputs.float()
        res = model(_batch_inputs.to(device), scale_factor, img_path)
        for instanceData in res:
            tmp = []
            img_path = instanceData.get('img_path')
            bboxes = instanceData.get('bboxes').cpu().numpy()
            labels = instanceData.get('labels').cpu().numpy()
            scores = instanceData.get('scores').cpu().numpy()
            index_match = np.where(scores < pred_score_thr)[0][0] if len(np.where(scores < pred_score_thr)[0])>0 else len(scores)
            scores = scores[:index_match]
            labels = instanceData.get('labels').cpu().numpy()[:index_match]
            bboxes = instanceData.get('bboxes').cpu().numpy()[:index_match]
            for label,score,bbox in zip(labels,scores,bboxes):
                tmp.append((label,score,list(bbox)))
            results.append((img_path,tmp))
    return results















class Model_PMI(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.backbone = CSPDarknetSPPFDeploy(
            deepen_factor=0.67,
            widen_factor=0.75,
            out_indices=(2, 3, 4),
            use_depthwise=False,
            spp_kernal_sizes=3,
        )
        self.neck = YOLOXPAFPN(
            in_channels=[192, 384, 768],
            out_channels=192,
            num_csp_blocks=2,
            act_cfg=dict(type='LeakyReLU'),
        )
        self.bbox_head = YOLOXHead(
            num_classes=6,
            in_channels=192,
            feat_channels=192,
            act_cfg=dict(type='LeakyReLU'),
        )

    @torch.no_grad()
    def forward(self, inputs: Tensor, scale_factor, img_path):
        num_imgs = inputs.size(0)

        x = self.backbone(inputs)
        x: Tuple[Tensor] = self.neck(x)

        results = self.bbox_head(x)

        cls_scores, bbox_preds, objectnesses = results

        batch_img_metas = [ dict(scale_factor= scale_factor[i], img_path=img_path[i] ) for i in range(num_imgs) ]

        results = self.bbox_head.predict_by_feat(
            *results, 
            batch_img_metas=batch_img_metas,
            rescale=True,
            cfg=ConfigDict( dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)) ),
        )
        for i, r in enumerate(results):
            r.set_field(img_path[i], 'img_path')

        return results


class CustomImageDataset_pmi(Dataset):
    def __init__(self, image_paths):
        self.image_paths = image_paths
        self.transformer = Compose([
            LoadImageFromFile( file_client_args=dict(backend='disk')),
            Resize(scale=(640, 640), keep_ratio=True), 
            Pad(pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))),
        ])

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        image_path = self.image_paths[idx]
        data_ = dict(img_path=image_path, img_id=0)
        img = self.transformer(data_)

        img_path = img['img_path']
        scale_factor = img['scale_factor']

        img = img['img']


        img = np.ascontiguousarray(img.transpose(2, 0, 1))
        img = torch.from_numpy(img).float()
        return img, scale_factor, img_path



def get_det_model_pmi(checkpoint_path):
    device = torch.device("cuda:0")
    model = Model_PMI().eval().to(device)
    weight = torch.load(checkpoint_path)['state_dict']
    model.load_state_dict(weight)
    return model

def det_infer_pmi(model,img_dirs, pred_score_thr=0.3):
    # Test a single image and show the results
    results = []
    dataset = CustomImageDataset_pmi(img_dirs)
    dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collect)

    device = torch.device("cuda:0")


    for _batch_inputs, (scale_factor, img_path) in dataloader:
        _batch_inputs = _batch_inputs.float()
        res = model(_batch_inputs.to(device), scale_factor, img_path)
        for instanceData in res:
            tmp = []
            img_path = instanceData.get('img_path')
            bboxes = instanceData.get('bboxes').cpu().numpy()
            labels = instanceData.get('labels').cpu().numpy()
            scores = instanceData.get('scores').cpu().numpy()
            index_match = np.where(scores < pred_score_thr)[0][0] if len(np.where(scores < pred_score_thr)[0])>0 else len(scores)
            scores = scores[:index_match]
            labels = instanceData.get('labels').cpu().numpy()[:index_match]
            bboxes = instanceData.get('bboxes').cpu().numpy()[:index_match]
            for label,score,bbox in zip(labels,scores,bboxes):
                tmp.append((label,score,list(bbox)))
            results.append((img_path,tmp))
    return results