import numpy as np 
from torch import nn
import torch.nn.functional as F
import torch


class LSTM(nn.Module):

    def __init__(self, input_size,  back_len, horizon, seq_len): #input_size = feature_num
        super(LSTM, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size=256, num_layers=2, batch_first=True, dropout=0.2)
        self.ln = nn.LayerNorm(input_size)
        self.fc1 = nn.Linear(256, 1)
        self.fc2 = nn.Linear(seq_len, back_len+horizon)      
        self.init_weight()

    def init_weight(self):  
        for name, value in self.state_dict().items():
            nn.init.normal_(value, mean=0,std=0.01)
        print('[finishing]:assign weight by inite_weight')
        
    def forward(self, x_imput):
        x_imput[torch.isinf(x_imput)] = torch.nan        
        masked = torch.isnan(x_imput)
        x_imput = x_imput.masked_fill(masked, 0)
        x_imput = self.ln(x_imput)
        lstm_out, _ = self.lstm(x_imput)
        output = self.fc1(lstm_out)
        output = output.transpose(1,2)
        # output = F.relu(output)
        output = self.fc2(output)
        output = output.transpose(1,2)
        return output
    
    #net = LSTM(1, 2, 3)
    # 1.加载参数
    # net.load()
    # 2. 改最后的一层分类器
    # net.fc2 =  nn.Linear(seq_len, 2)
class MLP_net(nn.Module):

    def __init__(self, input_size,  back_len, horizon, seq_len): #input_size = feature_num
        super(MLP_net, self).__init__()

        self.fc0 = nn.Linear(input_size,256)
        self.ln = nn.LayerNorm(input_size)
        self.fc1 = nn.Linear(256, 1)
        self.fc2 = nn.Linear(seq_len, back_len+horizon)      

        
    def forward(self, x_imput):
        x_imput[torch.isinf(x_imput)] = torch.nan        
        masked = torch.isnan(x_imput)
        x_imput = x_imput.masked_fill(masked, 0)
        x_imput = self.ln(x_imput)
        output = self.fc0(x_imput)
        output = F.relu(output)
        output = self.fc1(output)
        output = output.transpose(1,2)
        output = F.relu(output)
        output = self.fc2(output)
        output = output.transpose(1,2)
        return output

class MLP_rs_net(nn.Module):

    def __init__(self, input_size,  back_len, horizon, seq_len): #input_size = feature_num
        super(MLP_rs_net, self).__init__()

        self.fc0 = nn.Linear(input_size,256)
        self.ln = nn.LayerNorm(input_size)
        self.fc1 = nn.Linear(256, 1)
        self.fcmse = nn.Linear(seq_len, back_len+horizon)  
        self.fccor = nn.Linear(seq_len, back_len+horizon)     

        
    def forward(self, x_imput):
        x_imput[torch.isinf(x_imput)] = np.nan        
        masked = torch.isnan(x_imput)
        x_imput = x_imput.masked_fill(masked, 0)
        x_imput = self.ln(x_imput)
        output = self.fc0(x_imput)
        output = F.relu(output)
        output = self.fc1(output)
        output = output.transpose(1,2)
        # output = F.relu(output)
        mse_output = self.fcmse(output)
        mse_output = mse_output.transpose(1,2)
        cor_output = self.fccor(output)
        cor_output = cor_output.transpose(1,2)
        return mse_output, cor_output
    
class Cor_loss(nn.Module):
    def __init__(self) -> None:
        super(Cor_loss,self).__init__()
    
    def forward(self, pred, true):
        pred = pred.view((-1,))
        true = true.view((-1,))
        pred_mean = torch.mean(pred)
        true_mean = torch.mean(true)
        pred_std = torch.std(pred)
        true_std = torch.std(true)
        cov = torch.mean((pred-pred_mean)*(true-true_mean))
        cor = torch.div(cov, pred_std*true_std)
        return -cor
    
    
class Ma_loss(nn.Module):
    def __init__(self) -> None:
        super(Ma_loss,self).__init__()
    
    def forward(self, pred, true):
        # pred = pred.view((-1,))
        # true = true.view((-1,))
        pred_mean = torch.mean(pred, dim=1)
        true_mean = torch.mean(true, dim=1)
        diff = torch.abs(pred_mean- true_mean) 
        diff_mean = torch.mean(diff) 
        
        return diff_mean
