import math
import torch

from model.ISRG_block import *
from model.Resnet_block import *
import torch.nn.functional as F
from params import params_dict


def resnet34(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnet34-333f7ec4.pth
    return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)


def resnet50(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnet50-19c8e357.pth
    return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)


def resnet101(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
    return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)


def resnext50_32x4d(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
    groups = 32
    width_per_group = 4
    return ResNet(Bottleneck, [3, 4, 6, 3],
                  num_classes=num_classes,
                  include_top=include_top,
                  groups=groups,
                  width_per_group=width_per_group)


def resnext101_32x8d(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth
    groups = 32
    width_per_group = 8
    return ResNet(Bottleneck, [3, 4, 23, 3],
                  num_classes=num_classes,
                  include_top=include_top,
                  groups=groups,
                  width_per_group=width_per_group)



class CrossAttention(nn.Module):
    def __init__(self, dim_q, dim_kv, num_heads=8):
        super(CrossAttention, self).__init__()
        self.num_heads = num_heads
        self.dim_q = dim_q

        self.query = nn.Linear(dim_q, dim_q)
        self.key = nn.Linear(dim_kv, dim_q)
        self.value = nn.Linear(dim_kv, dim_q)
        self.scale = dim_q ** -0.5

        self.proj = nn.Linear(dim_q, dim_q)

    def forward(self, x_q, x_kv):
        B, N, C = x_q.shape
        _, S, _ = x_kv.shape

        q = self.query(x_q).view(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
        k = self.key(x_kv).view(B, S, self.num_heads, C // self.num_heads).permute(0, 2, 3, 1)
        v = self.value(x_kv).view(B, S, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)

        attn = (q @ k) * self.scale
        attn = attn.softmax(dim=-1)

        x = (attn @ v).permute(0, 2, 1, 3).contiguous().view(B, N, C)
        x = self.proj(x)
        return x


class MultimodalFusion(nn.Module):
    def __init__(self, img_dim, txt_dim, fusion_dim, num_heads=8):
        super(MultimodalFusion, self).__init__()
        self.img_dim = img_dim
        self.txt_dim = txt_dim
        self.fusion_dim = fusion_dim

        self.cross_attn_img_to_txt = CrossAttention(fusion_dim, fusion_dim, num_heads)
        self.cross_attn_txt_to_img = CrossAttention(fusion_dim, fusion_dim, num_heads)

        self.img_proj = nn.Linear(img_dim, fusion_dim)
        self.txt_proj = nn.Linear(txt_dim, fusion_dim)

        self.fusion_proj = nn.Linear(fusion_dim * 2, fusion_dim)

    def forward(self, img_features, txt_features):
        img_proj = self.img_proj(img_features)  # (B, N, fusion_dim)
        txt_proj = self.txt_proj(txt_features)  # (B, S, fusion_dim)

        img_to_txt = self.cross_attn_img_to_txt(txt_proj, img_proj)  # (B, S, fusion_dim)
        txt_to_img = self.cross_attn_txt_to_img(img_proj, txt_proj)  # (B, N, fusion_dim)

        # 上采样文本特征，使其长度与图像特征匹配
        txt_to_img_upsampled = F.interpolate(txt_to_img.permute(0, 2, 1), size=img_proj.shape[1]).permute(0, 2, 1)

        # 拼接特征
        fusion = torch.cat([img_proj, txt_to_img_upsampled], dim=-1)  # (B, N, fusion_dim * 2)
        fusion = self.fusion_proj(fusion)
        return fusion


class Fully_Connected(nn.Module):
    def __init__(self, input_dim, output_dim=4):
        super(Fully_Connected, self).__init__()
        self.layers = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        x = self.layers(x)
        return x


class PResnet(nn.Module):
    def __init__(self, vocab_size, embedding_dim, n_head, num_layers, v_vector_size, input_dim, output_dim=4, is_train=True):
        super(PResnet, self).__init__()
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.n_head = n_head
        self.num_layers = num_layers
        self.v_vector_size = v_vector_size
        self.is_train = is_train
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.P2V = ISRG(self.vocab_size, self.embedding_dim, self.n_head, self.num_layers, self.v_vector_size, self.is_train)
        self.Resnet = resnet101()
        self.multimodalFusion = MultimodalFusion(49, params_dict['embedding_dim'], 512)
        self.Fully_Connected = nn.Linear(2048, output_dim, bias=True)
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        # self.Fully_Connected = nn.Linear(511, params_dict['output_dim'], bias=True)

    def forward(self, img, predicate):
        v1 = self.Resnet(img)
        v2 = self.P2V(predicate)
        v = self.avgpool(self.multimodalFusion(v1, v2))
        # print(v.shape)
        v = torch.flatten(v, 1)
        # print(v.shape)
        x = self.Fully_Connected(v)
        return x


# if __name__ == '__main__':
#     v1 = torch.randn(24, 3, 224, 224)
#     v2 = torch.randint(1, 256, (24, 10))
#     model = PResnet(256, 64, 4, 2, 64, 1024, 4)
#     x = model(v1, v2)
#     print(x.shape)
