import torch
import torch.nn as nn
from torchvision import transforms as transforms
nn.Sequential()
class LogisticRegression(torch.nn.Module):
    def __init__(self,feature_size,args,device):
        super(LogisticRegression, self).__init__()
        self.num_class=args.test_way
        self.shot = args.test_shot
        self.feature_size=feature_size
        self.epoch = args.LR_epoch
        self.drop_r = args.LR_drop
        self.Cls = torch.nn.Linear(self.feature_size, self.num_class)
        self.init_way = args.LR_init
        self.layers = args.LR_layers
        self.device=device
        self.lr = args.LR_lr
    def make_layers(self):
       layers = []
       if 'ReLU' or 'relu' in self.layers:
           layers.append(nn.ReLU())
       if 'Dropout' or 'dropout' in self.layers:
           layers.append(nn.Dropout(self.drop_r))
       if 'BN' in self.layers:
           layers.append(nn.BatchNorm1d(self.feature_size))
       if 'LN' or 'layer norm' in self.layers:
           layers.append(nn.LayerNorm(self.feature_size))
       return layers
    
    def init_weight(self,data):
        #L2权重初始化，即利用特征的欧氏距离初始化线性分类器权重
        if self.init_way == 'L2':
            cls_num = self.num_class
            shot = self.shot
            self.Cls.eval()
            with torch.no_grad():
                for i in range(cls_num):
                    cls_data = data[(i*shot):((i+1)*shot),:]
                    data_SumSqrt = cls_data.mean(0).pow(2).sum(0).sqrt()
                    data_mean = cls_data.mean(0)
                    weight = data_mean.div(data_SumSqrt)
                    self.Cls.weight[i] = weight
        else:
            nn.init.normal_(self.Cls.weight,mean=0,std=0.01)
            nn.init.constant_(self.Cls.bias, val=0)
        
    def fit_(self,support_data,support_label):
        #初始化上一次任务的Cls
        self.Cls = torch.nn.Linear(self.feature_size, self.num_class)
        self.init_weight(support_data)
        layers = self.make_layers()
        Cls=nn.Sequential(
            *layers,
            self.Cls
            )      
        critetion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(Cls.parameters(),lr=self.lr)
        Cls.to(self.device)
        Cls.train()

            
        for epoch in range(self.epoch):
            preds = Cls(support_data.to(self.device))
            loss = critetion(preds, support_label)
            optimizer.zero_grad()
            loss.backward(retain_graph=True)
            optimizer.step()
        #为本次任务复制self.Cls，用于接下来得query预测
        self.Cls=Cls

    def forward(self, x):
        preds = self.Cls(x)
        return preds


