"""
Created by Kostas Triaridis (@kostino)
in August 2023 @ ITI-CERTH
"""
from collections import defaultdict
from torchvision.transforms.functional import normalize
import numpy as np
import torch
import torch.nn as nn
from albumentations.pytorch import ToTensorV2
from matplotlib import pyplot as plt
from torch.nn import functional as F

import clusterings
from a2s_utils.a2s.model import A2S
from a2s_utils.cornet.model import info_block, local_conv, up_conv
from clusterings.misc import to_one_hot, get_model
from loss import SSIM, FocalLoss
from models.base import BaseModel
from models.ffs import FreqFusion
from models.heads import SegFormerHead
import logging
import logging
from typing import Tuple, Dict, List

import torch
from torch import nn, Tensor

from models.heads.segformer import ConvModule
from models.layers import trunc_normal_
from torch.nn import functional as F

from pixelShufflePack import invertedBlock
from utils_iml.iml_vit_model_2 import iml_vit_model


class DiscriminativeSubNetwork(nn.Module):
    def __init__(self,in_channels=3, out_channels=3, base_channels=64, out_features=False):
        super(DiscriminativeSubNetwork, self).__init__()
        base_width = base_channels
        self.encoder_segment = EncoderDiscriminative(in_channels, base_width)
        self.decoder_segment = DecoderDiscriminative(base_width, out_channels=out_channels)
        #self.segment_act = torch.nn.Sigmoid()
        self.out_features = out_features
    def forward(self, x):
        b1,b2,b3,b4,b5,b6 = self.encoder_segment(x)
        output_segment = self.decoder_segment(b1,b2,b3,b4,b5,b6)
        if self.out_features:
            return output_segment, b2, b3, b4, b5, b6
        else:
            return output_segment

class EncoderDiscriminative(nn.Module):
    def __init__(self, in_channels, base_width):
        super(EncoderDiscriminative, self).__init__()
        self.block1 = nn.Sequential(
            nn.Conv2d(in_channels,base_width, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width),
            nn.ReLU(inplace=True))
        self.mp1 = nn.Sequential(nn.MaxPool2d(2))
        self.block2 = nn.Sequential(
            nn.Conv2d(base_width,base_width*2, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*2),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width*2, base_width*2, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*2),
            nn.ReLU(inplace=True))
        self.mp2 = nn.Sequential(nn.MaxPool2d(2))
        self.block3 = nn.Sequential(
            nn.Conv2d(base_width*2,base_width*4, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*4),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width*4, base_width*4, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*4),
            nn.ReLU(inplace=True))
        self.mp3 = nn.Sequential(nn.MaxPool2d(2))
        self.block4 = nn.Sequential(
            nn.Conv2d(base_width*4,base_width*8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*8),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width*8, base_width*8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*8),
            nn.ReLU(inplace=True))
        self.mp4 = nn.Sequential(nn.MaxPool2d(2))
        self.block5 = nn.Sequential(
            nn.Conv2d(base_width*8,base_width*8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*8),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width*8, base_width*8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*8),
            nn.ReLU(inplace=True))

        self.mp5 = nn.Sequential(nn.MaxPool2d(2))
        self.block6 = nn.Sequential(
            nn.Conv2d(base_width*8,base_width*8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*8),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width*8, base_width*8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*8),
            nn.ReLU(inplace=True))


    def forward(self, x):
        b1 = self.block1(x)
        mp1 = self.mp1(b1)
        b2 = self.block2(mp1)
        mp2 = self.mp3(b2)
        b3 = self.block3(mp2)
        mp3 = self.mp3(b3)
        b4 = self.block4(mp3)
        mp4 = self.mp4(b4)
        b5 = self.block5(mp4)
        mp5 = self.mp5(b5)
        b6 = self.block6(mp5)
        return b1,b2,b3,b4,b5,b6

class DecoderDiscriminative(nn.Module):
    def __init__(self, base_width, out_channels=1):
        super(DecoderDiscriminative, self).__init__()

        self.up_b = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
                                 nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1),
                                 nn.BatchNorm2d(base_width * 8),
                                 nn.ReLU(inplace=True))
        self.db_b = nn.Sequential(
            nn.Conv2d(base_width*(8+8), base_width*8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*8),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width * 8),
            nn.ReLU(inplace=True)
        )


        self.up1 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
                                 nn.Conv2d(base_width * 8, base_width * 4, kernel_size=3, padding=1),
                                 nn.BatchNorm2d(base_width * 4),
                                 nn.ReLU(inplace=True))
        self.db1 = nn.Sequential(
            nn.Conv2d(base_width*(4+8), base_width*4, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*4),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width * 4),
            nn.ReLU(inplace=True)
        )

        self.up2 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
                                 nn.Conv2d(base_width * 4, base_width * 2, kernel_size=3, padding=1),
                                 nn.BatchNorm2d(base_width * 2),
                                 nn.ReLU(inplace=True))
        self.db2 = nn.Sequential(
            nn.Conv2d(base_width*(2+4), base_width*2, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width*2),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width * 2),
            nn.ReLU(inplace=True)
        )

        self.up3 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
                                 nn.Conv2d(base_width * 2, base_width, kernel_size=3, padding=1),
                                 nn.BatchNorm2d(base_width),
                                 nn.ReLU(inplace=True))
        self.db3 = nn.Sequential(
            nn.Conv2d(base_width*(2+1), base_width, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width),
            nn.ReLU(inplace=True)
        )

        self.up4 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
                                 nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
                                 nn.BatchNorm2d(base_width),
                                 nn.ReLU(inplace=True))
        self.db4 = nn.Sequential(
            nn.Conv2d(base_width*2, base_width, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width),
            nn.ReLU(inplace=True),
            nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
            nn.BatchNorm2d(base_width),
            nn.ReLU(inplace=True)
        )



        self.fin_out = nn.Sequential(nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1))

    def forward(self, b1,b2,b3,b4,b5,b6):
        up_b = self.up_b(b6)
        cat_b = torch.cat((up_b,b5),dim=1)
        db_b = self.db_b(cat_b)

        up1 = self.up1(db_b)
        cat1 = torch.cat((up1,b4),dim=1)
        db1 = self.db1(cat1)

        up2 = self.up2(db1)
        cat2 = torch.cat((up2,b3),dim=1)
        db2 = self.db2(cat2)

        up3 = self.up3(db2)
        cat3 = torch.cat((up3,b2),dim=1)
        db3 = self.db3(cat3)

        up4 = self.up4(db3)
        cat4 = torch.cat((up4,b1),dim=1)
        db4 = self.db4(cat4)

        out = self.fin_out(db4)
        return out

class ReconstructiveSubNetwork(nn.Module):
    def __init__(self):
        super(ReconstructiveSubNetwork, self).__init__()
        # initialise network parameters
        filter = [64, 128, 256, 512, 512]
        # filter = [64, 128, 256, 512]
        # self.class_nb = 13

        # define encoder decoder layers
        self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
        self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
        for i in range(4):
        # for i in range(3):
            self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
            self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))

        # define convolution layer
        self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
        self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
        for i in range(4):
        # for i in range(3):
            if i == 0:
                self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
                self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
            else:
                self.conv_block_enc.append(nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
                                                         self.conv_layer([filter[i + 1], filter[i + 1]])))
                self.conv_block_dec.append(nn.Sequential(self.conv_layer([filter[i], filter[i]]),
                                                         self.conv_layer([filter[i], filter[i]])))

        # define task attention layers
        self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
        self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
        self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
        self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])

        for j in range(2):
            if j < 1:
                self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
                self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
            # for i in range(3):
            for i in range(4):
                self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
                self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))

        for i in range(4):
        # for i in range(3):
            if i < 3:
            # if i < 2:
                self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
                self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
            else:
                self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
                self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))

        self.pred_task1 = self.conv_layer([filter[0], 3], pred=True)
        self.pred_task2 = self.conv_layer([filter[0], 2], pred=True)
        # self.pred_task3 = self.conv_layer([filter[0], 2], pred=True)

        # define pooling and unpooling functions
        self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
        self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)

        self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))

        # cs = [512, 512, 256, 128, 64]
        # self.con1x1 = nn.ModuleList([
        #     nn.Conv2d(in_channels=cs[0], out_channels=cs[1], kernel_size=1),
        #     nn.Conv2d(in_channels=cs[1], out_channels=cs[2], kernel_size=1),
        #     nn.Conv2d(in_channels=cs[2], out_channels=cs[3], kernel_size=1),
        #     nn.Conv2d(in_channels=cs[3], out_channels=cs[4], kernel_size=1)
        # ])
        # self.ffs = nn.ModuleList([
        #     FreqFusion(hr_channels=512, lr_channels=512),
        #     FreqFusion(hr_channels=256, lr_channels=256),
        #     FreqFusion(hr_channels=128, lr_channels=128),
        #     FreqFusion(hr_channels=64, lr_channels=64)
        # ])

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_normal_(m.weight)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                nn.init.constant_(m.bias, 0)

    def conv_layer(self, channel, pred=False):
        if not pred:
            conv_block = nn.Sequential(
                nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
                nn.BatchNorm2d(num_features=channel[1]),
                nn.ReLU(inplace=True),
            )
        else:
            conv_block = nn.Sequential(
                nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
                nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
            )
        return conv_block

    def att_layer(self, channel):
        att_block = nn.Sequential(
            nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
            nn.BatchNorm2d(channel[1]),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
            nn.BatchNorm2d(channel[2]),
            nn.Sigmoid(),
        )
        return att_block

    def forward(self, x):
        g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
        # g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 4 for _ in range(5))
        # for i in range(4):
        for i in range(5):
            g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))

        # define attention list for tasks
        atten_encoder, atten_decoder = ([0] * 3 for _ in range(2))
        for i in range(2):
            atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
            # atten_encoder[i], atten_decoder[i] = ([0] * 4 for _ in range(2))
        for i in range(2):
            for j in range(5):
            # for j in range(4):
                atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))

        # define global shared network
        # for i in range(4):
        for i in range(5):
            if i == 0:
                g_encoder[i][0] = self.encoder_block[i](x)
                g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
                g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
            else:
                g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
                g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
                g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])

        # for i in range(5):
        # # for i in range(4):
        #     if i == 0:
        #         g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
        #         g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
        #         g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
        #     else:
        #         g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
        #         g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
        #         g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])

        for i in range(5):
            if i == 0:
                g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
                g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
                g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
            else:
                g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
                # temp = self.con1x1[i - 1](g_upsampl[i - 1])
                # print(g_upsampl[i].dtype)
                # print(temp.dtype)
                # _, ori, up = self.ffs[i - 1](g_upsampl[i],temp)
                # g_upsampl[i] = ori + up

                g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
                g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
        # define task dependent attention module
        for i in range(2):
            for j in range(5):
            # for j in range(4):
                if j == 0:
                    atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
                    atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
                    atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
                    atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
                else:
                    atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat((g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
                    atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
                    atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
                    atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)

            for j in range(5):
            # for j in range(4):
                if j == 0:
                    atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True)
                    atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
                    atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat((g_upsampl[j], atten_decoder[i][j][0]), dim=1))
                    atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
                else:
                    atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True)
                    atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
                    atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat((g_upsampl[j], atten_decoder[i][j][0]), dim=1))
                    atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]

        # define task prediction layers
        # t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
        t1_pred = self.pred_task1(atten_decoder[0][-1][-1])
        t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
        # t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
        # t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)

        return t1_pred, t2_pred, self.logsigma


class decoder(nn.Module):
    def __init__(self, feat):
        super(decoder, self).__init__()

        self.adapter0 = nn.Sequential(*list(up_conv(feat[0], feat[0], False)))
        self.adapter1 = nn.Sequential(*list(up_conv(feat[1], feat[0], False)))
        self.adapter2 = nn.Sequential(*list(up_conv(feat[2], feat[0], False)))
        self.adapter3 = nn.Sequential(*list(up_conv(feat[3], feat[0], False)))
        self.adapter4 = nn.Sequential(*list(up_conv(feat[4], feat[0], False)))

        self.region = info_block(None, feat[2:4], feat[0])
        self.local = info_block(None, feat[0:2], feat[0])

        self.gb_conv = nn.Sequential(*list(local_conv(feat[0], feat[0])))

    def forward(self, xs, x_size):
        xs[0] = self.adapter0(xs[0])
        xs[1] = self.adapter1(xs[1])
        xs[2] = self.adapter2(xs[2])
        xs[3] = self.adapter3(xs[3])
        xs[4] = self.adapter4(xs[4])

        glob_x = xs[4]
        reg_x, r3, r4 = self.region(xs[2:4], glob_x)

        glob_x = self.gb_conv(glob_x)
        loc_x, r1, r2 = self.local(xs[0:2], glob_x)

        reg_x = nn.functional.interpolate(reg_x, size=xs[0].size()[2:], mode='bilinear')
        pred = torch.sum(loc_x * reg_x, dim=1, keepdim=True)
        pred = nn.functional.interpolate(pred, size=x_size, mode='bilinear')

        return pred
# class decoder(nn.Module):
#     def __init__(self, feat):
#         super(decoder, self).__init__()
#
#         self.adapter0 = nn.Sequential(*list(up_conv(feat[0], feat[0], False)))
#         self.adapter1 = nn.Sequential(*list(up_conv(feat[1], feat[0], False)))
#         self.adapter2 = nn.Sequential(*list(up_conv(feat[2], feat[0], False)))
#         self.adapter3 = nn.Sequential(*list(up_conv(feat[3], feat[0], False)))
#         self.adapter4 = nn.Sequential(*list(up_conv(feat[4], feat[0], False)))
#
#         self.region = info_block(None, feat[2:], feat[0])
#         self.local = info_block(None, feat[0:2], feat[0])
#
#         self.gb_conv = nn.Sequential(*list(local_conv(feat[0], feat[0])))
#
#     def forward(self, xs, x_size):
#         h , w = x_size
#         xs[0] = self.adapter0(xs[0])
#         xs[1] = self.adapter1(xs[1])
#         xs[2] = self.adapter2(xs[2])
#         xs[3] = self.adapter3(xs[3])
#         # xs[4] = self.adapter4(xs[4])
#
#         glob_x = xs[3]
#         reg_x, r3, r4 = self.region(xs[2:], glob_x)
#
#         glob_x = self.gb_conv(glob_x)
#         loc_x, r1, r2 = self.local(xs[0:2], glob_x)
#
#         reg_x = nn.functional.interpolate(reg_x, size=xs[0].size()[2:], mode='bilinear')  # # 'nearest' #
#         pred = torch.sum(loc_x * reg_x, dim=1, keepdim=True)
#         pred = nn.functional.interpolate(pred, size=(h,w), mode='bilinear')
#
#         return pred

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)
class CMNeXtWithConf(BaseModel):
    def __init__(self, cfg=None) -> None:
        self.cfg = cfg
        backbone = cfg.BACKBONE
        num_classes = cfg.NUM_CLASSES
        modals = cfg.MODALS   # ori
        logging.info('Currently training for {}'.format(cfg.TRAIN_PHASE))
        logging.info('Loading Model: {}, with backbone: {}'.format(cfg.NAME, cfg.BACKBONE))
        super().__init__(backbone, num_classes, modals)
        self.decode_head = SegFormerHead(self.backbone.channels, 256 if 'B0' in backbone or 'B1' in backbone else 512,
                                         num_classes)
        self.conf_head = SegFormerHead(self.backbone.channels, 256 if 'B0' in backbone or 'B1' in backbone else 512, 1)
        # ----best---- #
        if cfg.AAM:
            fl = [64, 128,320,512,768]

            self.decoder = decoder(fl)
            vit_path = "/home/wc/disk1/IML-ViL/pretrained-weights/mae_pretrain_vit_base.pth"
            self.vit = iml_vit_model(vit_pretrain_path=vit_path)
            # self.fus = invertedBlock(2)
        # ----best---- #

        if cfg.DETECTION == 'confpool':
            self.detection = nn.Sequential(
                nn.Linear(in_features=8, out_features=128),
                nn.ReLU(),
                nn.Dropout(p=0.5),
                nn.Linear(in_features=128, out_features=1),
            )
        self.apply(self._init_weights)
        self.train_phase = cfg.TRAIN_PHASE
        assert self.train_phase in ['localization', 'detection']
        self.init_pretrained(cfg.PRETRAINED, backbone)
        if self.train_phase == 'detection':
            self.backbone.eval()
            self.decode_head.eval()
            self.conf_head.train()
            self.detection.train()
            for p in self.decode_head.parameters():
                p.requires_grad = False
            for p in self.backbone.parameters():
                p.requires_grad = False

    def set_train(self):
        if self.train_phase == 'localization':
            self.backbone.train()
            # self.backbone.eval()
            self.decode_head.train()
            # self.decode_head.eval()
        elif self.train_phase == 'detection':
            self.conf_head.train()
            self.detection.train()
            # self.rec.train()
            # self.seg.train()
        else:
            raise ValueError(f'Train phase {self.train_phase} not recognized!')

    def set_val(self):
        if self.train_phase == 'localization':
            self.backbone.eval()
            self.decode_head.eval()
            # self.a2s.eval()
            # self.rec.eval()
            # self.seg.eval()
        elif self.train_phase == 'detection':
            self.conf_head.eval()
            self.detection.eval()
            # self.rec.eval()
            # self.seg.eval()
        else:
            raise ValueError(f'Train phase {self.train_phase} not recognized!')


    def forward(self, x: list, epoch, masks = None):

        if masks is not None:
            # y = self.backbone(x, masks)
            y = self.backbone(x)
        else:
            y = self.backbone(x)
        '''
        [1,64,128,128]
        [1,128,64,64]
        [1,320,32,32]
        [1,512,16,16]
        '''
        if self.train_phase == 'detection':
            conf = self.conf_head(y)
            conf = F.interpolate(conf, size=x[0].shape[2:], mode='bilinear', align_corners=False)
        out = self.decode_head(y)
        out = F.interpolate(out, size=x[0].shape[2:], mode='bilinear', align_corners=False)
        if self.cfg.AAM:
            globalF = self.vit(x[0])
            y.append(globalF['last_feat'])
            out2 = self.decoder(y, x[0].shape[2:])  #[1,1,512,512]
            out2 = torch.sigmoid(out2)

            # fus1 = torch.cat((out[:,1:2,:,:], out2[:,1:2,:,:]),dim=1)
            # fus2 = torch.cat((out[:,0:1,:,:], out2[:,0:1,:,:]),dim=1)

            # fused_out = self.fus(torch.cat((out[:,0:1,:,:],out2),1))
            out = out * out2
            # out = self.fus(out)
            return out, out2
        if self.train_phase == 'detection':
            from .layer_utils import weighted_statistics_pooling
            f1 = weighted_statistics_pooling(conf).view(out.shape[0], -1)
            f2 = weighted_statistics_pooling(out[:, 1:2, :, :] - out[:, 0:1, :, :], F.logsigmoid(conf)).view(
                out.shape[0], -1)
            det = self.detection(torch.cat((f1, f2), -1))
            return out, conf, det

        return out

    def init_pretrained(self, pretrained: str = None, backbone: str = None) -> None:
        if pretrained:
            logging.info('Loading pretrained module: {}'.format(pretrained))
            if self.backbone.num_modals > 0:
                load_dualpath_model(self.backbone, pretrained, backbone)
            else:
                checkpoint = torch.load(pretrained, map_location='cpu')
                if 'state_dict' in checkpoint.keys():
                    checkpoint = checkpoint['state_dict']
                if 'model' in checkpoint.keys():
                    checkpoint = checkpoint['model']
                msg = self.backbone.load_state_dict(checkpoint, strict=False)
                print(msg)


def load_dualpath_model(model, model_file, backbone):
    extra_pretrained = model_file if 'MHSA' in backbone else None
    if isinstance(extra_pretrained, str):
        raw_state_dict_ext = torch.load(extra_pretrained, map_location=torch.device('cpu'))
        if 'state_dict' in raw_state_dict_ext.keys():
            raw_state_dict_ext = raw_state_dict_ext['state_dict']
    if isinstance(model_file, str):
        raw_state_dict = torch.load(model_file, map_location=torch.device('cpu'))
        if 'model' in raw_state_dict.keys():
            raw_state_dict = raw_state_dict['model']
    else:
        raw_state_dict = model_file

    state_dict = {}
    for k, v in raw_state_dict.items():
        if k.find('patch_embed') >= 0:
            state_dict[k] = v
        elif k.find('block') >= 0:
            state_dict[k] = v
        elif k.find('norm') >= 0:
            state_dict[k] = v

    if isinstance(extra_pretrained, str):
        for k, v in raw_state_dict_ext.items():
            if k.find('patch_embed1.proj') >= 0:
                state_dict[k.replace('patch_embed1.proj', 'extra_downsample_layers.0.proj.module')] = v
            if k.find('patch_embed2.proj') >= 0:
                state_dict[k.replace('patch_embed2.proj', 'extra_downsample_layers.1.proj.module')] = v
            if k.find('patch_embed3.proj') >= 0:
                state_dict[k.replace('patch_embed3.proj', 'extra_downsample_layers.2.proj.module')] = v
            if k.find('patch_embed4.proj') >= 0:
                state_dict[k.replace('patch_embed4.proj', 'extra_downsample_layers.3.proj.module')] = v

            if k.find('patch_embed1.norm') >= 0:
                for i in range(model.num_modals):
                    state_dict[k.replace('patch_embed1.norm', 'extra_downsample_layers.0.norm.ln_{}'.format(i))] = v
            if k.find('patch_embed2.norm') >= 0:
                for i in range(model.num_modals):
                    state_dict[k.replace('patch_embed2.norm', 'extra_downsample_layers.1.norm.ln_{}'.format(i))] = v
            if k.find('patch_embed3.norm') >= 0:
                for i in range(model.num_modals):
                    state_dict[k.replace('patch_embed3.norm', 'extra_downsample_layers.2.norm.ln_{}'.format(i))] = v
            if k.find('patch_embed4.norm') >= 0:
                for i in range(model.num_modals):
                    state_dict[k.replace('patch_embed4.norm', 'extra_downsample_layers.3.norm.ln_{}'.format(i))] = v
            elif k.find('block') >= 0:
                state_dict[k.replace('block', 'extra_block')] = v
            elif k.find('norm') >= 0:
                state_dict[k.replace('norm', 'extra_norm')] = v

    msg = model.load_state_dict(state_dict, strict=False)
    del state_dict


# if __name__ == '__main__':
#     from configs.cmnext_init_cfg import _C as cfg
#
#     logging.basicConfig(level=getattr(logging, 'INFO'))
#     device = 'cuda:1'
#     if device != 'cpu':
#         # cudnn setting
#         import torch.backends.cudnn as cudnn
#
#         cudnn.benchmark = False
#         cudnn.deterministic = True
#         cudnn.enabled = True
#     modal_extractor = ModalitiesExtractor(list(('noiseprint', 'bayar', 'srm')),
#                                           '/home/wc/disk1/MMFusion/pretrained/noiseprint/np++.pth')
#
#     ckpt = torch.load("/home/wc/disk1/MMFusion/ckpt/early_fusion_localization.pth")  # .pth 文件路径
#
#     modal_extractor.load_state_dict(ckpt['extractor_state_dict'])
#
#     modal_extractor.to(device)
#
#     modal_extractor.eval()
#
#     import albumentations as A
#
#     image_path = "/raid/datasets/ImageForgery/CAT-Net/tampCOCO/sp_images/85_000000513577.jpg_000000120127.jpg.jpg"
#     mask_path = "/raid/datasets/ImageForgery/CAT-Net/tampCOCO/sp_masks/85_000000513577.png_000000120127.png.png"
#     image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
#     mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
#     # plt.figure(figsize=(8, 8))
#     # plt.imshow(image)
#     # plt.figure(figsize=(8, 8))
#     # plt.imshow(mask)
#     h, w, c = image.shape
#     image_transforms_final = A.Compose([
#         ToTensorV2()
#     ])
#
#     if h > 2048 or w > 2048:
#         res = A.LongestMaxSize(max_size=2048)(image=image, mask=None)
#         image = res['image']
#
#     image = image_transforms_final(image=image)['image']
#     image = image / 256.0
#     image = image.to(device)
#     image = image.unsqueeze(0)
#     modals = modal_extractor(image)
#     images_norm = TF.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#     inp = [images_norm] + modals
#
#     model = CMNeXtWithConf(cfg.MODEL)
#     model.load_state_dict(ckpt['state_dict'], strict=False)
#     model = model.to(device)
#     model.eval()
#     y = model(inp)
#     # map = torch.nn.functional.softmax(pred, dim=1)[:, 1, :, :].squeeze().cpu().numpy()
#     map = torch.nn.functional.softmax(y, dim=1)[:, 1, :, :]
#
#     img = map[0].detach().cpu()
#     print(type(img))
#     # 使用Matplotlib可视化图片
#     # plt.figure(figsize=(8, 8))
#     # plt.imshow(img, cmap='gray')
#     # plt.axis('off')
#     # plt.show()
#     print(map.shape)
