import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import *
from mmdet3d.models import build_backbone, build_neck
from .polarlane_head import PolarLaneHead
from mmcv.utils import Config
from .ms2one import build_ms2one
from .utils import deepFeatureExtractor_EfficientNet

from mmdet.models.builder import BACKBONES


# overall network
class PolarLane(nn.Module):
    def __init__(self, args):
        super().__init__()
        self.no_cuda = args.no_cuda
        self.batch_size = args.batch_size
        self.num_lane_type = 1  # no centerline
        self.num_y_steps = args.num_y_steps
        self.max_lanes = args.max_lanes
        self.num_category = args.num_category
        _dim_ = args.polarlane_cfg.fpn_dim
        num_query = args.polarlane_cfg.num_query
        num_group = args.polarlane_cfg.num_group
        sparse_num_group = args.polarlane_cfg.sparse_num_group

        self.encoder = build_backbone(args.polarlane_cfg.encoder) # backbone
        if getattr(args.polarlane_cfg, 'neck', None):
            self.neck = build_neck(args.polarlane_cfg.neck) # FPN
        else:
            self.neck = None
        self.encoder.init_weights()
        self.ms2one = build_ms2one(args.ms2one) # DilateNaive

        # build 2d query-based instance seg
        self.head = PolarLaneHead(
            args=args,
            dim=_dim_,
            num_group=num_group,
            num_convs=4,
            in_channels=_dim_,
            kernel_dim=_dim_,
            position_range=args.position_range,
            top_view_region=args.top_view_region,
            positional_encoding=dict(
                type='SinePositionalEncoding',
                num_feats=_dim_// 2, normalize=True),
            num_query=num_query,
            pred_dim=self.num_y_steps,
            num_classes=args.num_category,
            embed_dims=_dim_,
            transformer=args.transformer,
            sparse_ins_decoder=args.sparse_ins_decoder,
            **args.polarlane_cfg.get('head', {}),
            trans_params=args.polarlane_cfg.get('trans_params', {})
        )


    def forward(self, image, _M_inv=None, is_training=True, extra_dict=None):
        from pytorch_grad_cam import GradCAM, HiResCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
        from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
        from pytorch_grad_cam.utils.image import (
                show_cam_on_image, deprocess_image, preprocess_image
            )
        from pytorch_grad_cam import GuidedBackpropReLUModel
        model = self.encoder.to('cuda')
        target_layers = [model.layer4]

        rgb_img = cv2.imread('./image/1.jpg', 1)[:, :, ::-1]
        rgb_img = np.float32(rgb_img) / 255
        input_tensor = preprocess_image(rgb_img,
                                        mean=[0.485, 0.456, 0.406],
                                        std=[0.229, 0.224, 0.225]).to('cuda')
        targets = None

        cam_algorithm = GradCAM
 
        with cam_algorithm(model=model, target_layers=target_layers) as cam:

            cam.batch_size = 1
            grayscale_cam = cam(input_tensor=input_tensor,
                                targets=targets,
                                aug_smooth=False,
                                eigen_smooth=False)

            grayscale_cam = grayscale_cam[0, :]

            cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
            cam_image = cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR)

        gb_model = GuidedBackpropReLUModel(model=model, device='cuda')
        gb = gb_model(input_tensor, target_category=None)

        cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
        cam_gb = deprocess_image(cam_mask * gb)
        gb = deprocess_image(gb)
        output_dir = './features'

        os.makedirs(output_dir, exist_ok=True)

        cam_output_path = os.path.join(output_dir, 'GradCAM_cam.jpg')
        gb_output_path = os.path.join(output_dir, 'GradCAM_gb.jpg')
        cam_gb_output_path = os.path.join(output_dir, 'GradCAM_cam_gb.jpg')

        cv2.imwrite(cam_output_path, cam_image)
        cv2.imwrite(gb_output_path, gb)
        cv2.imwrite(cam_gb_output_path, cam_gb)


        out_featList = self.encoder(image)
        
        neck_out = self.neck(out_featList)
        neck_out = self.ms2one(neck_out)

        output = self.head(
            dict(
                x=neck_out,
                lane_idx=extra_dict['seg_idx_label'],
                seg=extra_dict['seg_label'],
                lidar2img=extra_dict['lidar2img'],
                pad_shape=extra_dict['pad_shape'],
                ground_lanes=extra_dict['ground_lanes'] if is_training else None,
                ground_lanes_dense=extra_dict['ground_lanes_dense'] if is_training else None,
                image=image,
            ),
            is_training=is_training,
        )
        return output