from operator import ne
from torch import nn
import torch.nn.functional as F
import torch

class LSTM(nn.Module):

    def __init__(self, input_size,  seq_len): #input_size = feature_num
        super(LSTM, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size=256, num_layers=2, batch_first=True, dropout=0.2)
        self.ln = nn.LayerNorm(input_size)
        self.fc1 = nn.Linear(256, 1)
        self.fc2 = nn.Linear(seq_len, 1)
        
    def forward(self, x_imput):
        x_imput[torch.isinf(x_imput)] = torch.nan        
        masked = torch.isnan(x_imput)
        x_imput = x_imput.masked_fill(masked, 0)
        x_imput = self.ln(x_imput)
        lstm_out, _ = self.lstm(x_imput)
        output = self.fc1(lstm_out)
        output = output.transpose(1,2)
        output = F.relu(output)
        output = self.fc2(output)
        output = output.transpose(1,2)
        output = F.sigmoid(output)
        return output
    
    #net = LSTM(1, 2, 3)
    # 1.加载参数
    # net.load()
    # 2. 改最后的一层分类器
    # net.fc2 =  nn.Linear(seq_len, 2)
 
 
class MLP_net(nn.Module):

    def __init__(self, input_size,  back_len, horizon, seq_len): #input_size = feature_num
        super(MLP_net, self).__init__()

        self.fc0 = nn.Linear(input_size,256)
        self.ln = nn.LayerNorm(input_size)
        self.fc1 = nn.Linear(256, 1)
        self.fc2 = nn.Linear(seq_len, back_len+horizon)      

        
    def forward(self, x_imput):
        x_imput[torch.isinf(x_imput)] = torch.nan        
        masked = torch.isnan(x_imput)
        x_imput = x_imput.masked_fill(masked, 0)
        x_imput = self.ln(x_imput)
        output = self.fc0(x_imput)
        output = F.relu(output)
        output = self.fc1(output)
        output = output.transpose(1,2)
        output = F.relu(output)
        output = self.fc2(output)
        output = output.transpose(1,2)
        output = F.sigmoid(output)
        return output
    

class FocalLossV1(nn.Module):
 
    def __init__(self,
                 alpha=0.25,
                 gamma=2,
                 reduction='mean',):
        super(FocalLossV1, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction
        self.crit = nn.BCEWithLogitsLoss(reduction='none')
 
        self.celoss = torch.nn.CrossEntropyLoss(reduction='none')
        
        
    def forward(self, logits, label):
        '''
        args:
            logits: tensor of shape (N, ...)
            label: tensor of shape(N, ...)
        '''
 
        # compute loss
        logits = logits.float() # use fp32 if logits is fp16
        with torch.no_grad():
            alpha = torch.empty_like(logits).fill_(1 - self.alpha)
            alpha[label == 1] = self.alpha
        ce_loss=(-(label * torch.log(logits)) - (
                    (1 - label) * torch.log(1 - logits)))
        # ce_loss=(-(label * torch.log(torch.softmax(logits, dim=1))) - (
        #             (1 - label) * torch.log(1 - torch.softmax(logits, dim=1))))
        pt = torch.where(label == 1, logits, 1 - logits)
        # ce_loss = self.crit(logits, label)
        loss = (alpha * torch.pow(1 - pt, self.gamma) * ce_loss)
        if self.reduction == 'mean':
            loss = loss.mean()
        if self.reduction == 'sum':
            loss = loss.sum()
        return loss

    