import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import joblib

class Autoencoder(nn.Module):
    def __init__(self, input_dim, encoding_dim=64):
        self.input_dim = input_dim
        self.encoding_dim = encoding_dim
        super(Autoencoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(input_dim, 128),
            nn.ReLU(),
            nn.Linear(128, encoding_dim),
            nn.ReLU()
        )
        self.decoder = nn.Sequential(
            nn.Linear(encoding_dim, 128),
            nn.ReLU(),
            nn.Linear(128, input_dim)
        )
        #self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.device = torch.device('cpu')
        self.to(self.device) # 将模型移动到指定设备
        torch.manual_seed(42)
        np.random.seed(42)
    
    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return decoded
    
    def train_autoencoder(self, train_loader, val_loader, num_epochs=100, learning_rate=0.001, save_path = None):
        criterion = nn.MSELoss()
        optimizer = optim.Adam(self.parameters(), lr=learning_rate)
        train_losses = []
        val_losses = []
        best_loss = np.inf
        for epoch in range(num_epochs):
            # 训练阶段
            self.train() #启动训练模式
            train_loss = 0
            for batch in train_loader:
                data = batch[0].to(self.device)
                optimizer.zero_grad()
                outputs = self.forward(data)
                loss = criterion(outputs, data)
                loss.backward()
                optimizer.step()
                train_loss += loss.item()
            # 验证阶段
            self.eval()#去除模型中dropout等层的随机性
            with torch.no_grad(): #关闭梯度计算
                val_loss = 0
                for batch in val_loader:
                    data = batch[0].to(self.device)
                    outputs = self.forward(data)
                    loss = criterion(outputs, data)
                    val_loss += loss.item()
            train_loss /= len(train_loader)
            val_loss /= len(val_loader)
            train_losses.append(train_loss)
            val_losses.append(val_loss)
            if val_loss < best_loss:
                best_loss = val_loss
                self.save_model(save_path)  # 保存模型
            if (epoch + 1) % 20 == 0:
                print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}')
        return train_losses, val_losses
    
    def save_model(self, path):
        torch.save({ #保存模型参数
        'model_state_dict': self.state_dict(),
        'input_dim': self.input_dim,
        'encoding_dim': self.encoding_dim
        }, path)
        return
        
    def load_model(self, path):
        model_path = f"{path}//autoencoder.pth"
        scaler_path = f"{path}//scaler.joblib"
        checkpoint = torch.load(model_path)
        self.load_state_dict(checkpoint['model_state_dict'])
        self.input_dim = checkpoint['input_dim']
        self.encoding_dim = checkpoint['encoding_dim']
        scaler = joblib.load(scaler_path)
        return scaler