import torch
import torch.nn as nn

from Network.EncDec.ImageDec import ImageDecoder
from Network.convnext import convnext_tiny


def einsum_replacement_simple(in_mul_k, weight):
    """
    更简洁的通用实现
    """
    # 获取批量大小和空间维度
    i = in_mul_k.size(0)
    m = in_mul_k.size(4)
    n = in_mul_k.size(5)
    o = weight.size(0)
    
    # 重排 in_mul_k: [i, j, k, l, m, n] -> [i, m, n, j, k, l] -> [i*m*n, j*k*l]
    in_flat = in_mul_k.permute(0, 4, 5, 1, 2, 3).contiguous()
    in_flat = in_flat.view(i * m * n, -1)
    
    # 重排 weight: [o, j, k, l] -> [j*k*l, o]
    weight_flat = weight.view(o, -1).T
    
    # 矩阵乘法: [i*m*n, j*k*l] × [j*k*l, o] = [i*m*n, o]
    output_flat = torch.matmul(in_flat, weight_flat)
    
    # 恢复形状: [i*m*n, o] -> [i, o, m, n]
    output = output_flat.view(i, m, n, o).permute(0, 3, 1, 2)
    
    return output
    
    
class GETANet(nn.Module):
    def __init__(self):
        super(GETANet, self).__init__()

        # encoder part


        self.channels = [96,192,384,768]

        self.vgg_r = convnext_tiny(pretrained=True)
        self.vgg_d = convnext_tiny(pretrained=True)

        self.conv1_vgg_r = nn.Sequential(self.vgg_r.downsample_layers[0], self.vgg_r.stages[0])
        self.conv1_vgg_d = nn.Sequential(self.vgg_d.downsample_layers[0], self.vgg_d.stages[0])
        self.conv2_vgg_r = nn.Sequential(self.vgg_r.downsample_layers[1], self.vgg_r.stages[1])
        self.conv2_vgg_d = nn.Sequential(self.vgg_d.downsample_layers[1], self.vgg_d.stages[1])
        self.conv3_vgg_r = nn.Sequential(self.vgg_r.downsample_layers[2], self.vgg_r.stages[2])
        self.conv3_vgg_d = nn.Sequential(self.vgg_d.downsample_layers[2], self.vgg_d.stages[2])
        self.conv4_vgg_r = nn.Sequential(self.vgg_r.downsample_layers[3], self.vgg_r.stages[3])
        self.conv4_vgg_d = nn.Sequential(self.vgg_d.downsample_layers[3], self.vgg_d.stages[3])

        # decoder part
        self.ImageDecoder = ImageDecoder()


    def forward(self, image_Input, depth_Input):
    #def forward(self,image_Input,depth_Input):
        #image_Input = RGBT[0]
        #depth_Input = RGBT[1]


        image_feat_1 = self.conv1_vgg_r(depth_Input)
        image_feat_2 = self.conv2_vgg_r(image_feat_1)
        image_feat_3 = self.conv3_vgg_r(image_feat_2)
        image_feat_4 = self.conv4_vgg_r(image_feat_3)
        image_feat = [image_feat_1,image_feat_2,image_feat_3,image_feat_4]


        depth_feat_1 = self.conv1_vgg_d(image_Input)
        depth_feat_2 = self.conv2_vgg_d(depth_feat_1)
        depth_feat_3 = self.conv3_vgg_d(depth_feat_2)
        depth_feat_4 = self.conv4_vgg_d(depth_feat_3)
        depth_feat = [depth_feat_1,depth_feat_2,depth_feat_3,depth_feat_4]



        outputs_image = self.ImageDecoder(image_feat, depth_feat)



        return outputs_image

    def init_parameters(self, pretrain_vgg16_1024):
        # init all layers
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

        # load rgb encoder parameters
        rgb_conv_blocks = [self.ImageEncoder.conv1,
                           self.ImageEncoder.conv2,
                           self.ImageEncoder.conv3,
                           self.ImageEncoder.conv4,
                           self.ImageEncoder.conv5,
                           self.ImageEncoder.fc6]

        listkey = [['conv1_1', 'conv1_2'], ['conv2_1', 'conv2_2'], ['conv3_1', 'conv3_2', 'conv3_3'],
                   ['conv4_1', 'conv4_2', 'conv4_3'], ['conv5_1', 'conv5_2', 'conv5_3'], ['fc6']]

        for idx, conv_block in enumerate(rgb_conv_blocks):
            num_conv = 0
            for l2 in conv_block:
                if isinstance(l2, nn.Conv2d):
                    if 'fc' in listkey[idx][num_conv]:
                        l2.weight.data = pretrain_vgg16_1024[str(listkey[idx][num_conv]) + '.weight'][:512, :512]
                        l2.bias.data = pretrain_vgg16_1024[str(listkey[idx][num_conv])
                                                           + '.bias'][:, :, :, :512].squeeze()
                    else:
                        l2.weight.data = pretrain_vgg16_1024[str(listkey[idx][num_conv]) + '.weight']
                        l2.bias.data = pretrain_vgg16_1024[str(listkey[idx][num_conv]) + '.bias'].squeeze(
                            0).squeeze(
                            0).squeeze(0).squeeze(0)
                    num_conv += 1
        return self
        

if __name__ == "__main__":
    import onnxruntime as rt
    # model
    model = GETANet()
    model = model.eval()
    
    # input
    bs = 1
    shape = 512
    method = "GETANet"
    rgb = torch.randn(bs, 3, shape, shape)
    tir = torch.randn(bs, 3, shape, shape)
    inputs = (rgb, tir)
    
    # run
    pred = model(rgb, tir)
    print(f"[INFO] torchrun: pred={[x.shape for x in pred]}")
    
    # onnx
    input_names = ['rgb', 'tir']
    output_names = ['pred']

    # dynamic_axes = {'latents': {0: '-1'}}
    save_onnx = f'./{method}_bs{bs}_{shape}.onnx'

    torch.onnx.export(
        model,
        inputs,
        save_onnx,
        input_names=input_names,
        # dynamic_axes=dynamic_axes,
        output_names=output_names,
        opset_version=16,
    )
    
    print(f"[INFO] saved onnx: {save_onnx}")
    
    # onnx runtime
    sess = rt.InferenceSession(save_onnx, providers=['CPUExecutionProvider'])
    
    net_inputs = {
        'rgb': rgb.cpu().numpy(),
        'tir': tir.cpu().numpy(),
    }
    
    net_output = sess.run(None, net_inputs)[0]
    
    print(f"[INFO] onnxruntime: net_output={net_output.shape}")
    print(f'[INFO] cmd2om: atc --framework=5 --model={method}_bs1_512.onnx --output={method}_bs1_512 --input_format=NCHW --input_shape="rgb:1,3,512,512;tir:1,3,512,512" --log=error --soc_version=Ascend310B1')
