import logging
import numpy as np
import torch
import torch.nn as nn
import warnings
from datetime import datetime
import sys
sys.path.append('../common/')
from utils import *
from torch.optim import *
from tqdm import tqdm
from os.path import dirname

# 忽略警告信息
warnings.filterwarnings("ignore")
# 设置日志格式
logger = logging.getLogger(__name__)
log_path = dirname(__file__) + "/log/" + datetime.now().strftime("%Y_%m_%d") +  "_usadlstm.log"
file_handler = logging.FileHandler(log_path)
formatter = logging.Formatter(
    "%(asctime)s\t%(levelname)s\t%(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)


class Encoder(nn.Module):
    def __init__(self, input_size, latent_size, lstm_layers = 1, dropout_rate=0.2):
        super().__init__()

        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=latent_size,
            num_layers=lstm_layers,
            batch_first=True)
        self.dropout1 = nn.Dropout(dropout_rate)


    def forward(self, w):
        out1, (_, _) = self.lstm(w)
        z = self.dropout1(out1)
        return z


class Decoder(nn.Module):
    def __init__(self, latent_size, output_size, lstm_layers = 1, dropout_rate=0.2):
        super().__init__()
        
        self.lstm = nn.LSTM(
            input_size=latent_size,
            hidden_size=int(output_size / 2),
            num_layers=lstm_layers,
            batch_first=True)
        self.dropout1 = nn.Dropout(dropout_rate)
        self.linear = nn.Linear(int(output_size / 2), output_size)

    def forward(self, z):
        out1, (_, _) = self.lstm(z)
        out1 = self.dropout1(out1)
        w = self.linear(out1)
        return w


class UsadLSTM(nn.Module):
    def __init__(self, w_size, z_size, lstm_layers = 1, l2 = 1e-4, dropout_rate=0.2):
        super().__init__()
        self.w_size = w_size
        self.z_size = z_size
        self.encoder = Encoder(w_size, z_size, lstm_layers = lstm_layers, dropout_rate = dropout_rate)
        self.decoder1 = Decoder(z_size, w_size, lstm_layers = lstm_layers, dropout_rate = dropout_rate)
        self.decoder2 = Decoder(z_size, w_size, lstm_layers = lstm_layers, dropout_rate = dropout_rate)
        # 默认 device 为 gpu
        self.device = get_default_device()
        self.l2 = l2

    def training_step(self, batch, n):
        """训练阶段"""
        z = self.encoder(batch)
        w1 = self.decoder1(z)
        w2 = self.decoder2(z)
        w3 = self.decoder2(self.encoder(w1))
        loss_ae1 = 1 / n * torch.mean((batch - w1) ** 2) + (1 - 1 / n) * torch.mean(
            (batch - w3) ** 2
        )
        loss_ae2 = 1 / n * torch.mean((batch - w2) ** 2) - (1 - 1 / n) * torch.mean(
            (batch - w3) ** 2
        )
        return loss_ae1, loss_ae2

    def validation_step(self, batch, n):
        """验证阶段，不更新参数"""
        with torch.no_grad():
            z = self.encoder(batch)
            w1 = self.decoder1(z)
            w2 = self.decoder2(z)
            w3 = self.decoder2(self.encoder(w1))
            loss_ae1 = 1 / n * torch.mean((batch - w1) ** 2) + (1 - 1 / n) * torch.mean(
                (batch - w3) ** 2
            )
            loss_ae2 = 1 / n * torch.mean((batch - w2) ** 2) - (1 - 1 / n) * torch.mean(
                (batch - w3) ** 2
            )
        return {"val_loss1": loss_ae1, "val_loss2": loss_ae2}

    def validation_epoch_end(self, outputs):
        batch_losses1 = [x["val_loss1"] for x in outputs]
        # 计算每个批次的损失函数值的平均值
        epoch_loss1 = torch.stack(batch_losses1).mean()
        batch_losses2 = [x["val_loss2"] for x in outputs]
        epoch_loss2 = torch.stack(batch_losses2).mean()
        return {"val_loss1": epoch_loss1.item(), "val_loss2": epoch_loss2.item()}

    def epoch_end(self, epoch, result):
        info = "Epoch [{}], \tval_loss1: {:.4f},\tval_loss2: {:.4f}".format(
            epoch, result["val_loss1"], result["val_loss2"]
        )
        logger.info(info)

    def fit(self, train_loader, validation_loader, epochs, optimizer=Adam):
        self.to(self.device)
        return training(
            epochs,
            self,
            train_loader,
            validation_loader,
            optimizer=optimizer,
            device=self.device
        )
    
    def predict_prob(self, test_loader, alpha = 0.1, beta = 0.9):
        results = testing(self, test_loader, alpha, beta)
        if len(results) >= 2:
            y_pred = np.concatenate(
                [
                    torch.stack(results[:-1]).flatten().detach().cpu().numpy(),
                    results[-1].flatten().detach().cpu().numpy(),
                ]
            )
        else:
            y_pred = (results[-1].flatten().detach().cpu().numpy(),)  

        return y_pred       


def evaluate(model, validation_loader, n, device="cpu"):
    """评估模型在验证集上性能"""
    outputs = [
        model.validation_step(to_device(batch, device), n)
        for batch in validation_loader
    ]
    return model.validation_epoch_end(outputs)

def training(epochs, model, train_loader, validation_loader, optimizer = Adam, device="cpu"):
    history = []  
    optimizer1 = optimizer(
        list(model.encoder.parameters()) + list(model.decoder1.parameters()), weight_decay = model.l2
    )  

    optimizer2 = optimizer(
        list(model.encoder.parameters()) + list(model.decoder2.parameters()), weight_decay = model.l2
    ) 
    for epoch in tqdm(range(epochs)):
        for batch in train_loader:
            batch = to_device(batch, device)
            # 训练 AE1
            loss1, loss2 = model.training_step(batch, epoch + 1)
            loss1.backward()
            optimizer1.step()
            optimizer1.zero_grad()

            # 训练 AE2
            loss1, loss2 = model.training_step(batch, epoch + 1)
            loss2.backward()
            optimizer2.step()
            optimizer2.zero_grad()            

        if validation_loader is not None:
            result = evaluate(model, validation_loader, epoch + 1, device)
            model.epoch_end(epoch, result)
            history.append(result)         
    return history  

def testing(model, test_loader, alpha = 0.1, beta = 0.9):
    with torch.no_grad():
        model.eval()
        results = []
        for batch in test_loader:
            batch = to_device(batch, get_default_device())
            w1 = model.decoder1(model.encoder(batch))
            w2 = model.decoder2(model.encoder(w1))
            results.append(
                alpha * torch.mean((batch - w1) ** 2, axis=1)
                + beta * torch.mean((batch - w2) ** 2, axis=1)                
            )
    return results