import numpy as np
import torch.utils.data as data
from matplotlib import pyplot as plt
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch
import torch.nn as nn
import torch.nn.functional as F

from predicte.dataset import create_train_data


def train_model_no_attention(X_train, y_train, X_val, y_val, model_path=None):
    class CustomEarlyStopping:
        def __init__(self, patience=10, delta=0, verbose=False):
            self.patience = patience
            self.delta = delta
            self.verbose = verbose
            self.counter = 0
            self.best_score = None
            self.early_stop = False
            self.best_epoch = 0

        def __call__(self, val_loss, epoch):
            score = -val_loss

            if self.best_score is None:
                self.best_score = score
                self.best_epoch = epoch

            elif score < self.best_score + self.delta:
                self.counter += 1
                if self.verbose:
                    print(f'EarlyStopping counter: {self.counter} out of {self.patience}, score: {self.best_score}')

                if self.counter >= self.patience:
                    self.early_stop = True
            else:
                self.best_score = score
                self.best_epoch = epoch
                self.counter = 0

    class Net(nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.conv1 = nn.Conv1d(in_channels=9, out_channels=64, kernel_size=3, padding=1)
            self.conv2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
            self.maxpool = nn.MaxPool1d(2)
            self.lstm1 = nn.LSTM(input_size=64, hidden_size=128, batch_first=True)
            self.dropout1 = nn.Dropout(0.2)
            self.bidirectional = nn.LSTM(128, 128, bidirectional=True, batch_first=True)
            self.dropout2 = nn.Dropout(0.2)
            self.dense1 = nn.Linear(128 * 2, 64)
            self.dense2 = nn.Linear(64, 8)
            self.dense3 = nn.Linear(8, 1)

        def forward(self, x):
            x = F.relu(self.conv1(x))
            x = F.relu(self.conv2(x))
            x = self.maxpool(x.permute(0, 2, 1))
            x, _ = self.lstm1(x)
            x = self.dropout1(x)
            x, _ = self.bidirectional(x)
            x = x[:, -1, :]
            x = self.dropout2(x)
            x = F.sigmoid(self.dense1(x))
            x = self.dense2(x)
            x = self.dense3(x)
            return x

    def calculate_rmse(model, X, y, criterion):
        with torch.no_grad():
            y_pred = model(X.permute(0, 2, 1)).detach()
            rmse = np.sqrt(criterion(y_pred.cpu(), y.unsqueeze(1).detach()))
        return rmse

    # 初始化模型
    Torchmodel = Net()

    # 如果有保存的模型，加载模型
    if model_path:
        Torchmodel.load_state_dict(torch.load(model_path))
        print("Loaded model from", model_path)

    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(Torchmodel.parameters(), lr=1e-3, weight_decay=1e-5)
    scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    Torchmodel = Torchmodel.to(device)
    criterion = criterion.to(device)

    loader = data.DataLoader(data.TensorDataset(X_train, y_train),
                             batch_size=8, shuffle=True)

    X_val = X_val.to(device)

    early_stopping = CustomEarlyStopping(patience=10, verbose=True)

    train_losses = []
    val_losses = []

    epochs = 100
    for epoch in range(epochs):
        Torchmodel.train()
        train_loss = 0
        for X_batch, y_batch in loader:
            X_batch = X_batch.to(device)
            y_batch = y_batch.to(device)
            y_pred = Torchmodel(X_batch.permute(0, 2, 1))
            loss = criterion(y_pred, y_batch.unsqueeze(1))
            optimizer.zero_grad(set_to_none=True)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(loader)
        train_losses.append(np.sqrt(train_loss))

        Torchmodel.eval()

        val_rmse = calculate_rmse(Torchmodel, X_val, y_val, criterion)
        val_losses.append(val_rmse.item())

        scheduler.step(val_rmse)
        early_stopping(val_rmse, epoch)

        if early_stopping.early_stop:
            print("Early stopping")
            break

        if epoch % 10 == 0:
            print('*' * 10, 'Epoch: ', epoch, '\ train RMSE: ', np.sqrt(train_loss), '\ val RMSE', val_rmse.item())

    print(Torchmodel)
    torch.save(Torchmodel.state_dict(), 'model_state_dict.pth')

    return Torchmodel, train_losses, val_losses


# 假设数据准备和训练代码在这里
X_train, y_train, X_val, y_val, X_test, y_test, scaler = create_train_data(lookback=3)
print("X_train shape:", X_train.shape)

# 加载已保存的模型并继续训练
Torchmodel, train_losses, val_losses = train_model_no_attention(X_train, y_train, X_val, y_val, model_path='model_state_dict.pth')

# 后续代码用于评估和可视化
plt.figure(figsize=(10, 5))
plt.plot(train_losses, label='Train Loss')
plt.plot(val_losses, label='Val Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()