import torch
from torch import nn


import torchvision.models as models
from tqdm.notebook import tqdm


class AddWeightProduct(nn.Module):
    """Implement of large margin arc distance."""
    def __init__(self, in_features, out_features, s=14, m=0.5, easy_margin=True):
        super(AddWeightProduct, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.s = s
        #self.m = nn.Parameter(torch.tensor(m), requires_grad=False)
        self.m = torch.tensor(m)
        self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features))
        nn.init.xavier_uniform_(self.weight)
        self.easy_margin = easy_margin
        self.cos_m = torch.cos(self.m)
        self.sin_m = torch.sin(self.m)
        self.th = torch.cos(torch.pi - self.m)
        self.mm = torch.sin(torch.pi - self.m) * self.m

    def forward(self, input):
        input = input.view(input.size(0), -1)
        cosine = nn.functional.linear(nn.functional.normalize(input), nn.functional.normalize(self.weight))
        sine = torch.sqrt((1.0 - torch.pow(cosine, 2)).clamp(0, 1))
        phi = cosine * self.cos_m - sine * self.sin_m
        if self.easy_margin:
            phi = torch.where(cosine > 0, phi, cosine)
        else:
            phi = torch.where(cosine > self.th, phi, cosine - self.mm)
        output = phi * self.s
        return output
    
class ConstantGateWeight(nn.Module):
    def __init__(self):
        super(ConstantGateWeight, self).__init__()
        # 初始化时不需要指定形状，因为我们将在forward中动态确定形状

    def forward(self, x):
        # 门控权重是一个与x有相同batch size的标量，值恒为1
        batch_size = x.size(0)  # 获取x的batch size
        gate_weight = torch.zeros(batch_size, 1, 1, 1, device=x.device)  # 创建一个与x有相同batch size的恒为1的张量
        return x, gate_weight
class IncrementalTaskLayer(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(IncrementalTaskLayer, self).__init__()

        self.layer = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(out_channels),
        )
        
        # 门控权重生成网络，这里使用一个简单的卷积层
        # 门控权重生成网络，增加几层卷积，并使用全局平均池化将输出转换为标量

        #NT1
        self.gate_generator = nn.Sequential(
            #=512
            nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),  # 第1层卷积
            nn.BatchNorm2d(in_channels),
            nn.LeakyReLU(inplace=True),
            nn.AdaptiveMaxPool2d((1, 1)),
            nn.Flatten(),  # 展平特征图
            nn.Linear(in_channels, 1),  # 全连接层，输出一个标量值
            nn.Hardsigmoid()  # 确保门控权重在0到1之间
        )

    def forward(self, x):
        #print('x shape:', x.shape)
        # 通过门控层
        gated_output = self.layer(x)
        
        # 通过门控权重生成网络，生成门控权重
        #print('torch.cat([x, gated_output]:', torch.cat([x, gated_output], dim=1).shape)
        '''#NT2
        gate_weight = self.gate_generator(torch.cat([x, gated_output], dim=1)).unsqueeze(-1).unsqueeze(-1)'''
        #NT1
        gate_weight = self.gate_generator(x).unsqueeze(-1).unsqueeze(-1)
        #gate_weight = self.gate_generator(gated_output).unsqueeze(-1).unsqueeze(-1)
        #print(gate_weight.shape)
        
        # 计算x和gated_output的加权和
        output = gate_weight * gated_output + (1 - gate_weight) * x
        return output,gate_weight

# 定义 ArcMultiTaskModel 类
class ArcMultiTaskModel(nn.Module):
    def __init__(self, base_model_name, num_classes=512, weights=None):
        self.num_classes = num_classes
        super(ArcMultiTaskModel, self).__init__()
        self.model_dict = {
            'resnet18': models.resnet18,
            'resnet34': models.resnet34,
            'resnet50': models.resnet50,
            # 可以继续添加更多的模型
        }
        
        if base_model_name not in self.model_dict:
            raise ValueError(f"Unsupported base model name: {base_model_name}")
        
        self.base_model = self.model_dict[base_model_name](weights=weights)
        self.in_channels_before_avgpool = self.base_model.layer4[-1].conv2.out_channels
        self.base_model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.base_model.maxpool = nn.Identity()
        '''self.base_model.fc = nn.Sequential(
            nn.Linear(self.base_model.fc.in_features, num_classes),
            nn.LeakyReLU(),
        )'''
        self.fc_in_features=self.base_model.fc.in_features
        # 动态获取特征提取部分
        self.feature_extractor = nn.Sequential(*list(self.base_model.children())[:-2])  # 去掉avgpool和fc


        del self.base_model

        self.incremental_layers = nn.ModuleList()  # 用于维护增量任务层
        self.arc_classifiers = nn.ModuleList()  # 用于维护ARC分类器

    def add_incremental_task_layer(self,isInitial=False):

        
        if isInitial:
            layer=ConstantGateWeight()
        else:
            layer=IncrementalTaskLayer(self.in_channels_before_avgpool,self.in_channels_before_avgpool)
        
        self.incremental_layers.append(layer)
        print(f'Incremental Task Layer added, current len:', len(self.incremental_layers))

    
    def add_arc_task_classifier(self, feature_dim, num_classes_per_task, s=14.0, m=0.0):
        layer=nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Flatten(),
            nn.Linear(self.fc_in_features, self.num_classes),
            nn.LeakyReLU(),
            AddWeightProduct(in_features=feature_dim, out_features=num_classes_per_task, s=s, m=m)
        )
        self.arc_classifiers.append(layer)
        print('Incremental ARC Classifier added, current len:', len(self.arc_classifiers))

    def forward(self, x):
        # 通过特征提取部分
        features = self.feature_extractor(x)

        # 通过增量任务层
        gate_weights = []
        

        for idx in range(len(self.incremental_layers)):
            features,gate_weight = self.incremental_layers[idx](features)
            gate_weights.append(gate_weight.squeeze())



        # 计算task_predictions------------------------------------------------------
        # 初始化task_predictions列表
        task_predictions = []

        # 计算所有gate_weights的1减去的乘积
        all_gate_weights = torch.stack(gate_weights, dim=1)
        #print('all_gate_weights:', all_gate_weights)

        # 对于第0个任务
        task_predictions.append(torch.prod(1 - all_gate_weights,dim=1))  # 最后一个任务的1减去的乘积
        # 对于中间的任务（0 < n < m）
        for i in range(1, len(gate_weights) - 1):
            weight_product = all_gate_weights[:,i] * torch.prod(1 - all_gate_weights[:,i+1:],dim=1)
            #print('weight_product:', weight_product)
            task_predictions.append(weight_product)

        # 对于最后一个任务（第m个任务），只有它自己的权重
        task_predictions.append(gate_weights[-1])
        #print('task_predictions bfs:',  task_predictions)
        task_predictions = torch.stack(task_predictions, dim=1)
        #print('task_predictions:',  task_predictions)

        
        # 通过ARC分类器
        arc_predictions = []
        for arc_classifier in self.arc_classifiers:
            arc_prediction = arc_classifier(features)
            arc_predictions.append(arc_prediction)
        arc_predictions = torch.cat(arc_predictions, dim=1)
        return features, arc_predictions,task_predictions

        

# 定义创建模型的函数
def create_ArcMultiTaskModel(base_model_name, num_classes, weights=None):
    return ArcMultiTaskModel(base_model_name, num_classes, weights=weights)