import torch
import torch.nn as nn
import torch.nn.functional as F
from gxl_ai_utils.utils import utils_file


class SimpleTransformerEncoder(nn.Module):
    def __init__(self, dim, num_heads, num_layers, dim_feedforward):
        super(SimpleTransformerEncoder, self).__init__()
        self.transformer = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(dim, num_heads, dim_feedforward=dim_feedforward),
            num_layers
        )

    def forward(self, X, X_lens):
        """

        Args:
            X: (B,L,D)
            X_lens: (B,)

        Returns:
            (B,L,D)

        """
        # 转换X的形状
        X = X.transpose(0, 1)  # X: (L, B, D)
        mask = torch.arange(X.size(0)).unsqueeze(0).to(X.device) >= X_lens.unsqueeze(1)
        mask = mask.to(X.device)
        # 使用transformer
        X1 = self.transformer(X, src_key_padding_mask=mask)  # X1: (L, B, D)
        # 再次转换X1的形状
        X1 = X1.transpose(0, 1)  # X1: (B, L, D)
        return X1


class DepthwiseSeparableConv1d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size):
        super(DepthwiseSeparableConv1d, self).__init__()
        padding = kernel_size // 2
        self.depthwise = nn.Conv1d(in_channels, in_channels, kernel_size, padding=padding, groups=in_channels)
        self.pointwise = nn.Conv1d(in_channels, out_channels, 1)

    def forward(self, x):
        """

        Args:
            x: (B,L,D)

        Returns:
            (B,L,D)

        """
        # 转换x的形状为(batch, channels, length)
        x = x.transpose(1, 2)
        x = self.depthwise(x)
        x = self.pointwise(x)
        # 再次转换x的形状为(batch, length, channels)
        x = x.transpose(1, 2)

        return x


class MultiLayerDepthwiseSeparableConv1d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, num_layers):
        super(MultiLayerDepthwiseSeparableConv1d, self).__init__()
        self.layers = nn.ModuleList([
            DepthwiseSeparableConv1d(in_channels, out_channels, kernel_size)
            for _ in range(num_layers)
        ])

    def forward(self, x):
        """
        Args:
            x: (B, L, D)
        Returns:
            (B, L, D)
        """
        for layer in self.layers:
            x = layer(x)
        return x

class MLP(nn.Module):
    def __init__(self, dim):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(dim, int(1.5 * dim))
        self.fc2 = nn.Linear(int(1.5 * dim), dim)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x


class LinearCompress(nn.Module):
    def __init__(self, dim, keepdim=False):
        super(LinearCompress, self).__init__()
        self.linear = nn.Linear(dim, 1)
        self.softmax = nn.Softmax(dim=1)
        self.keepdim = keepdim

    def forward(self, X, X_lens):
        """

        Args:
            X: (B,L,D)
            X_lens: (B,)

        Returns:
            (B,D)

        """
        # 计算权重
        weights = self.linear(X).squeeze(2)  # weights: (batch, len)

        # 将无效部分设置为无穷小
        for i, l in enumerate(X_lens):
            weights[i, l:] = float('-inf')

        # softmax操作
        weights = self.softmax(weights)

        # 应用权重并求和
        output = (X * weights.unsqueeze(2)).sum(dim=1, keepdim=self.keepdim)  # output: (batch, 1, dim)

        return output

class AvgPoolCompress(nn.Module):
    def __init__(self, keepdim=False):
        super(AvgPoolCompress, self).__init__()
        self.keepdim = keepdim

    def forward(self, X, X_lens):
        # 将无效部分设置为0
        for i, l in enumerate(X_lens):
            X[i, l:] = 0

        # 计算有效长度
        valid_lens = X_lens.float().unsqueeze(1).unsqueeze(2)  # valid_lens: (batch, 1, 1)

        # 应用平均池化
        output = X.sum(dim=1, keepdim=self.keepdim) / valid_lens  # output: (batch, 1, dim)

        return output

class MaxPoolCompress(nn.Module):
    def __init__(self, keepdim=False):
        super(MaxPoolCompress, self).__init__()
        self.keepdim = keepdim

    def forward(self, X, X_lens):
        # 将无效部分设置为负无穷大
        for i, l in enumerate(X_lens):
            X[i, l:] = float('-inf')

        # 应用最大池化
        output, _ = X.max(dim=1, keepdim=self.keepdim)  # output: (batch, 1, dim)

        return output

class ClassifierModel(nn.Module):
    def __init__(self, dim, num_classes, num_heads, num_layers):
        super(ClassifierModel, self).__init__()
        self.transformer = SimpleTransformerEncoder(dim, num_heads, num_layers, dim_feedforward=int(1.5 * dim))
        self.conv = MultiLayerDepthwiseSeparableConv1d(dim,dim,5,10)
        self.compress = LinearCompress(dim)
        self.mlp = MLP(dim)
        self.linear_output = nn.Linear(dim, num_classes, bias=False)
        self.embedding = nn.Embedding(num_classes, dim)
        self.embedding.weight = self.linear_output.weight

    def forward(self, x, x_lens, label):
        """"""
        # print("x",x.shape)
        X1 = self.transformer(x, x_lens)
        # print("X1",X1.shape)
        X2 = self.conv(X1)
        # print("X2",X2.shape)
        X3 = self.compress(X2,x_lens)  # 加权求和压缩序列
        # print("X3",X3.shape)
        X4 = self.mlp(X3)
        # print("X4",X4.shape)
        logits = self.linear_output(X4)
        # print("logits",logits.shape)
        Label2 = self.embedding(label)
        # print("Label2",Label2.shape)
        CE_loss = F.cross_entropy(logits, label)
        # 计算预测的类别
        _, predicted = torch.max(logits, 1)
        # 计算正确率
        correct = (predicted == label).float().sum() / label.size(0)
        # print("CE_loss",CE_loss)
        Cosine_loss = F.cosine_embedding_loss(X4, Label2, torch.ones_like(label))
        # print("Cosine_loss",Cosine_loss)
        return CE_loss + Cosine_loss,CE_loss,Cosine_loss,correct


class ClassifierModel_with_residual(nn.Module):
    def __init__(self, dim, num_classes, num_heads, num_layers):
        super(ClassifierModel_with_residual, self).__init__()
        self.transformer = SimpleTransformerEncoder(dim, num_heads, num_layers, dim_feedforward=int(1.5 * dim))
        self.conv = MultiLayerDepthwiseSeparableConv1d(dim,dim,5,10)
        self.compress = LinearCompress(dim)
        self.mlp = MLP(dim)
        self.linear_output= nn.Linear(dim, num_classes,bias=False)
        self.embedding = nn.Embedding(num_classes, dim)
        self.embedding.weight = self.linear_output.weight
        self.weight_residual1 = nn.Parameter(torch.tensor(0.99))
        self.weight_residual2 = nn.Parameter(torch.tensor(0.99))
        self.weight_residual3 = nn.Parameter(torch.tensor(0.99))
        self.weight_residual4 = nn.Parameter(torch.tensor(1.0))


    def forward(self, x, x_lens, label):
        """"""
        # print("x",x.shape)
        X1 = self.transformer(x, x_lens)*(1-self.weight_residual1) + x*self.weight_residual1
        # print("X1",X1.shape)
        X2 = self.conv(X1)*(1-self.weight_residual2) + X1*self.weight_residual2
        # print("X2",X2.shape)
        X3 = self.compress(X2,x_lens)  # 加权求和压缩序列
        # print("X3",X3.shape)
        X4 = self.mlp(X3)*(1-self.weight_residual3) + X3*self.weight_residual3
        # print("X4",X4.shape)
        logits = self.linear_output(X4)
        # print("logits",logits.shape)
        Label2 = self.embedding(label)
        # print("Label2",Label2.shape)
        CE_loss = F.cross_entropy(logits, label)
        # print("CE_loss",CE_loss)
        Cosine_loss = F.cosine_embedding_loss(X4, Label2, torch.ones_like(label))
        # print("Cosine_loss",Cosine_loss)
        return CE_loss*(2-self.weight_residual4) + Cosine_loss*self.weight_residual4, CE_loss, Cosine_loss


def copy_model(model1, model2):
    # 复制模型1的参数到模型2
    state_dict1 = model1.state_dict()
    state_dict2 = model2.state_dict()

    for key in state_dict1:
        if key in state_dict2:
            state_dict2[key] = state_dict1[key]

    model2.load_state_dict(state_dict2)

if __name__ == '__main__':
    # 定义模型参数
    dim = 128
    num_heads = 4
    num_layers = 6
    num_classes = 7

    # 创建模型实例
    model = ClassifierModel(
        dim=dim,
        num_classes=num_classes,
        num_heads=num_heads,
        num_layers=num_layers
    )
    model2 = ClassifierModel_with_residual(
        dim=dim,
        num_classes=num_classes,
        num_heads=num_heads,
        num_layers=num_layers
    )

    # 定义输入数据
    batch_size = 16
    seq_len = 10
    input_dim = 128
    x = torch.randn(batch_size, seq_len, input_dim)
    x_lens = torch.randint(low=1, high=seq_len, size=(batch_size,))
    label = torch.randint(low=0, high=num_classes, size=(batch_size,))
    print(model)
    utils_file.print_model_size(model)
    print(label)
    copy_model(model, model2)
    # 前向传播
    output = model(x, x_lens, label)
    output2 = model2(x, x_lens, label)
    print(output)
    print(output2)

