import torch
import torch.nn as nn
import torchvision.models as models


import Utils
from Modules.SPPModule import SPPModule
from Models.ModelRegister import DefaultModelRegister as Register
from Modules.DCA import DCA, get_dark_channel
from Modules.CrossAttention import CrossAttention
from Modules.SeModule import SeModule
import torch.nn.functional as F
from torch.nn import TransformerEncoderLayer

from Modules.SoftPool import SoftPool, SoftPooling2D
from Utils import sobel_conv

from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce



class Resnet34_rgb_feature_net(nn.Module):
    def __init__(self, num_classes=1, pretrained=True):
        super().__init__()
        self.resnet = models.resnet34(pretrained=pretrained)
        self.se = SeModule(512)

    def forward(self, x, need_pool = True):
        x = self.resnet.conv1(x)
        x = self.resnet.bn1(x)
        x = self.resnet.relu(x)
        x = self.resnet.maxpool(x)
        x = self.resnet.layer1(x)
        x = self.resnet.layer2(x)
        x = self.resnet.layer3(x)
        x = self.resnet.layer4(x)
        x = self.se(x)
        if need_pool:
            x = self.resnet.avgpool(x)
        return x

class Resnet34_gradient_feature_net(nn.Module):
    def __init__(self, num_classes=1, pretrained=True):
        super().__init__()
        self.resnet = models.resnet34(pretrained=pretrained)
        self.resnet.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.se = SeModule(512)

    def forward(self, x,need_pool = True):
        x = sobel_conv(self,x)
        x = self.resnet.conv1(x)
        x = self.resnet.bn1(x)
        x = self.resnet.relu(x)
        x = self.resnet.maxpool(x)
        x = self.resnet.layer1(x)
        x = self.resnet.layer2(x)
        x = self.resnet.layer3(x)
        x = self.resnet.layer4(x)
        x = self.se(x)
        if need_pool:
            x = self.resnet.avgpool(x)
        return x

class Resnet34_dark_feature_net(nn.Module):
    def __init__(self, num_classes=1, pretrained=True):
        super().__init__()
        self.resnet = models.resnet34(pretrained=pretrained)
        self.resnet.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.se = SeModule(512)

    def forward(self, x,need_pool = True):
        x = get_dark_channel(x)
        x = self.resnet.conv1(x)
        x = self.resnet.bn1(x)
        x = self.resnet.relu(x)
        x = self.resnet.maxpool(x)
        x = self.resnet.layer1(x)
        x = self.resnet.layer2(x)
        x = self.resnet.layer3(x)
        x = self.resnet.layer4(x)
        x = self.se(x)
        if need_pool:
            x = self.resnet.avgpool(x)
        return x

class Pred_expert(nn.Module):
    def __init__(self, num_fit = 512, num_classes = 1):
        super().__init__()
        self.fc = nn.Linear(num_fit, num_classes)

    def forward(self, x):
        x = self.fc(x)
        return x

def DiffSoftmax(logits, tau=1.0, hard=False, dim=-1, k=1):
    y_soft = (logits / tau).softmax(dim)
    if hard:
        index = y_soft.max(dim, keepdim=True)[1]
        y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
        ret = y_hard - y_soft.detach() + y_soft
    else:
        ret = y_soft
    return ret, y_soft

class Range_router(nn.Module):
    def __init__(self, branch_num, in_channels=512, hidden_dim=512):
        super().__init__()
        self.bnum = branch_num
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels,in_channels,1,1,0),
            nn.BatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels, in_channels, 1, 1, 0),
            nn.BatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
        )
        self.pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(hidden_dim, branch_num)

    def forward(self, x, temp=1.0, hard=False):
        x = self.conv(x) + x
        x = self.pool(x)
        x = torch.flatten(x, 1)
        logits = self.fc(x)  # batch_size*branch_num
        logits, y_soft = DiffSoftmax(logits, tau=temp, hard=hard, dim=-1)
        return logits, y_soft

class H_Router_Pred(nn.Module):
    def __init__(self, num_classes=1, pred_expert_nums = 4, numFit = 512*3, in_channels = 512 * 3):
        super().__init__()
        self.pred_expert_nums = pred_expert_nums
        self.num_classes = num_classes
        self.pred_expert_list = nn.ModuleList()

        for i in range(pred_expert_nums):
            self.pred_expert_list.append(Pred_expert(numFit, num_classes))

        self.range_router = Range_router(pred_expert_nums,in_channels,in_channels)

    def forward(self, x):
        b, _, _, _ = x.size()
        origin_x = x
        if self.training:
            range_logits, range_y_soft = self.range_router(x)
        else:
            range_logits, range_y_soft = self.range_router(x,hard=True)
        # print(range_logits)
        # print(range_y_soft)
        range_expert_x = torch.zeros((b, 1))
        if "cuda" in str(next(self.parameters()).device):
            range_expert_x = range_expert_x.cuda()

        x = torch.flatten(x,1)
        for i in range(self.pred_expert_nums):
            expert_id = i
            expert = self.pred_expert_list[expert_id]
            x1 = expert(x)
            range_expert_x += x1 * range_logits[:, expert_id].reshape(-1, 1)
        return range_expert_x, range_y_soft



@Register.register("MPFNet")
class MPFNet(nn.Module):
    def __init__(self, num_classes=1, pretrained=True):
        super().__init__()
        self.eval_train_res_flag = False
        self.rgb_feature_net = Resnet34_rgb_feature_net(num_classes=num_classes, pretrained=pretrained)
        self.dark_feature_net = Resnet34_dark_feature_net(num_classes=num_classes, pretrained=pretrained)
        self.gradient_feature_net = Resnet34_gradient_feature_net(num_classes=num_classes, pretrained=pretrained)
        self.se = SeModule(512)
        self.pred = H_Router_Pred(num_classes=num_classes, pred_expert_nums = 4, numFit = 512, in_channels=512)

        self.fusion1 = CrossAttention(512, 512, 512)
        self.conv1 = nn.Sequential(
            nn.Conv2d(1024,1024,1,1),
            nn.BatchNorm2d(1024),
            nn.ReLU(inplace=True),
            SeModule(1024),
            nn.Conv2d(1024,512,1,1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
        )
        self.fusion2 = CrossAttention(512, 512, 512)
        self.conv2 = nn.Sequential(
            nn.Conv2d(1024, 1024, 1, 1),
            nn.BatchNorm2d(1024),
            nn.ReLU(inplace=True),
            SeModule(1024),
            nn.Conv2d(1024, 512, 1, 1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
        )
        self.pool = nn.AdaptiveAvgPool2d(1)

    def forward(self, x):
        x_rgb = self.rgb_feature_net(x, False)
        x_dark = self.dark_feature_net(x, False)
        x_gradient = self.gradient_feature_net(x, False)

        x0,x1 = self.fusion1([x_rgb, x_dark])
        x = torch.concat([x0,x1],dim = 1)
        x = self.conv1(x)

        x0, x1 = self.fusion2([x_gradient, x])
        x = torch.concat([x0, x1], dim=1)
        x = self.conv2(x)

        x = self.se(x)
        x = self.pool(x)
        x, range_y_soft = self.pred(x)
        if self.training or self.eval_train_res_flag:
            return x, range_y_soft
        return x

if __name__ == '__main__':
    model = MPFNet()
    x = torch.randn((2,3,224,224))
    y = model(x)

    print(type(y) == tuple)
    print(y)
