import torch
import torch.nn as nn
import torch.nn.functional as F
# from utils.utils import *
# from mmdet3d.registry import MODELS 
from mmdet.registry import MODELS as MODEL
# from mmdet3d.models import build_backbone, build_neck
from latr_head import LATRHead
from mmengine.config import Config
from ms2one import build_ms2one
from utils import deepFeatureExtractor_EfficientNet
from transformer import FFN, MultiheadAttention, build_positional_encoding
from registry import (POSITIONAL_ENCODING, ATTENTION, 
                      FEEDFORWARD_NETWORK, TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE)

# from mmdet.models.builder import BACKBONES
from data.Load_Data import LaneDataset
from torch.utils.data import DataLoader
import sys


# overall network
class LATR(nn.Module):
    def __init__(self, args):
        super().__init__()
        self.no_cuda = args.no_cuda
        self.batch_size = args.batch_size
        self.num_lane_type = 1  # no centerline
        self.num_y_steps = args.num_y_steps
        self.max_lanes = args.max_lanes
        self.num_category = args.num_category
        _dim_ = args.latr_cfg.fpn_dim
        num_query = args.latr_cfg.num_query
        num_group = args.latr_cfg.num_group
        sparse_num_group = args.latr_cfg.sparse_num_group

        self.encoder = MODEL.build(args.latr_cfg.encoder)
        if getattr(args.latr_cfg, 'neck', None):
            self.neck = MODEL.build(args.latr_cfg.neck)
        else:
            self.neck = None
        # self.encoder.init_weights()
        self.ms2one = build_ms2one(args.ms2one)

        # build 2d query-based instance seg
        self.head = LATRHead(
            args=args,
            dim=_dim_,
            num_group=num_group,
            num_convs=4,
            in_channels=_dim_,
            kernel_dim=_dim_,
            position_range=args.position_range,
            top_view_region=args.top_view_region,
            positional_encoding=dict(
                type='SinePositionalEncoding',
                num_feats=_dim_// 2, normalize=True),
            num_query=num_query,
            pred_dim=self.num_y_steps,
            num_classes=args.num_category,
            embed_dims=_dim_,
            transformer=args.transformer,
            sparse_ins_decoder=args.sparse_ins_decoder,
            **args.latr_cfg.get('head', {}),
            trans_params=args.latr_cfg.get('trans_params', {})
        )

    def forward(self, image, _M_inv=None, is_training=True, extra_dict=None):
        # print("image.shape= ", image.shape), # (1,3,720,960)
        out_featList = self.encoder(image)
        # backbone_out = self.encoder(input)
        # backbone_out:
        # i=  0 , shape=  torch.Size([1, 512, 90, 120]), 1/8
        # i=  1 , shape=  torch.Size([1, 1024, 45, 60]), 1/16
        # i=  2 , shape=  torch.Size([1, 2048, 23, 30]), 1/32   
        neck_out = self.neck(out_featList)
        # neck_out: 
        # i=  0 , shape=  torch.Size([1, 256, 90, 120]), 1/8
        # i=  1 , shape=  torch.Size([1, 256, 45, 60]), 1/16
        # i=  2 , shape=  torch.Size([1, 256, 23, 30]), 1/32
        # i=  3 , shape=  torch.Size([1, 256, 12, 15]), 1/64   
        neck_out = self.ms2one(neck_out)
        # print(neck_out.shape), torch.Size([1, 256, 90, 120]), 1/8

        output = self.head(
            dict(
                x=neck_out, # [1, 256, 90, 120]
                lane_idx=extra_dict['seg_idx_label'], # [20,720,960]
                seg=extra_dict['seg_label'], # [1,720,960]
                lidar2img=extra_dict['lidar2img'],
                pad_shape=extra_dict['pad_shape'],
                ground_lanes=extra_dict['ground_lanes'] if is_training else None, # [20,81]
                ground_lanes_dense=extra_dict['ground_lanes_dense'] if is_training else None, # [20,600]
                image=image, # (1,3,720,960)
            ),
            is_training=is_training,
        )
        return output
    

if __name__ == "__main__":
    config_path = "/media/yjs/Data1/learn_contents/algorithm/gitee/LATR/config/release_iccv/latr_1000_baseline.py"
    cfg = Config.fromfile(config_path)
    train_dataset = LaneDataset(cfg.dataset_dir, cfg.data_dir, cfg, data_aug=True)
    train_loader = DataLoader(dataset=train_dataset, batch_size=1, shuffle=False, num_workers=1, drop_last=True)
    latr_model = LATR(cfg)
    latr_model = latr_model.cuda()
    latr_model.train()
    for data in train_loader:
        for key in data.keys():
            if isinstance(data[key], torch.Tensor):
                data[key] = data[key].cuda()
        image = data['image']
        output = latr_model(image=image, extra_dict=data, is_training=True)
        print("len(output)= ", len(output))
        print(output.keys())
        # for key, item in data.items():
        #     if isinstance(item, torch.Tensor):
        #         print("key= ", key, ", ", item.shape)
        break
    
    