import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import pandas as pd

# Define the configurable ANN model
class ANNModel(nn.Module):
    def __init__(self, input_dim=1, hidden_dim1=16, hidden_dim2=16, output_dim=1):
        super(ANNModel, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim1)
        self.fc2 = nn.Linear(hidden_dim1, hidden_dim2)
        self.fc3 = nn.Linear(hidden_dim2, output_dim)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x)
        return x


def train_model(model, criterion, optimizer, train_loader, val_loader, epochs=100, device="cpu"):
    train_losses = []
    val_losses = []

    model.to(device)

    for epoch in range(epochs):
        model.train()
        train_loss = 0.0
        for x_batch, y_batch in train_loader:
            x_batch, y_batch = x_batch.to(device), y_batch.to(device)
            optimizer.zero_grad()
            outputs = model(x_batch)
            loss = criterion(outputs, y_batch)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(train_loader)
        train_losses.append(train_loss)

        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for x_batch, y_batch in val_loader:
                x_batch, y_batch = x_batch.to(device), y_batch.to(device)
                outputs = model(x_batch)
                loss = criterion(outputs, y_batch)
                val_loss += loss.item()

        val_loss /= len(val_loader)
        val_losses.append(val_loss)

        print(f"Epoch [{epoch+1}/{epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}")

    return train_losses, val_losses


if __name__ == "__main__":
    # # Load data from CSV file
    # data = pd.read_csv('./output/pod_output.csv')
    #
    # # Extract x (Force) and y (Component_1)
    # x = data['Force'].values.reshape(-1, 1)
    # y = data['Component_1'].values.reshape(-1, 1)
    #
    # # Normalize the data
    # scaler_x = MinMaxScaler()
    # scaler_y = MinMaxScaler()
    # x_scaled = scaler_x.fit_transform(x)
    # y_scaled = scaler_y.fit_transform(y)
    #
    # x_train, x_val, y_train, y_val = train_test_split(x_scaled, y_scaled, test_size=0.2, random_state=42)

    data_train = pd.read_csv('./output/reduced_data_train.csv')
    data_valid = pd.read_csv('./output/reduced_data_test.csv')
    x_train, x_val = data_train['Force'].values.reshape(-1, 1), data_valid['Force'].values.reshape(-1, 1)
    y_train, y_val = data_train['Component_1'].values.reshape(-1, 1), data_valid['Component_1'].values.reshape(-1, 1)

    # Normalize the data
    x = np.concatenate((x_train, x_val), axis=0)
    y = np.concatenate((y_train, y_val), axis=0)
    scaler_x = MinMaxScaler()
    scaler_y = MinMaxScaler()
    scaler_x.fit(x)
    scaler_y.fit(y)
    x_train = scaler_x.transform(x_train)
    y_train = scaler_y.transform(y_train)
    x_val = scaler_x.transform(x_val)
    y_val = scaler_y.transform(y_val)

    # Convert data to PyTorch tensors
    x_train_tensor = torch.tensor(x_train, dtype=torch.float32)
    y_train_tensor = torch.tensor(y_train, dtype=torch.float32)
    x_val_tensor = torch.tensor(x_val, dtype=torch.float32)
    y_val_tensor = torch.tensor(y_val, dtype=torch.float32)

    train_dataset = torch.utils.data.TensorDataset(x_train_tensor, y_train_tensor)
    val_dataset = torch.utils.data.TensorDataset(x_val_tensor, y_val_tensor)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=32, shuffle=False)

    # Initialize the model, loss function, and optimizer
    model = ANNModel()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)

    # Check for CUDA availability
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Train the model
    epochs = 100
    train_losses, val_losses = train_model(model, criterion, optimizer, train_loader, val_loader, epochs, device=device)

    # Plot learning curve
    plt.figure(figsize=(10, 6))
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Validation Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.title('Learning Curve')
    plt.legend()
    plt.show()

    # Evaluate and plot predictions
    model.eval()
    with torch.no_grad():
        # x_full_tensor = torch.tensor(x_scaled, dtype=torch.float32).to(device)
        x_full_tensor = torch.tensor(x_val, dtype=torch.float32).to(device)
        y_pred_scaled = model(x_full_tensor).cpu().numpy()
        y_pred = scaler_y.inverse_transform(y_pred_scaled)

    plt.figure(figsize=(10, 6))
    plt.scatter(x, y, label='True Data', alpha=0.6)
    # plt.plot(x, y_pred, color='red', label='Predicted Data')
    plt.scatter(scaler_x.inverse_transform(x_val), y_pred, color='red', label='Predicted Data')
    plt.xlabel('Force (x)')
    plt.ylabel('Component_1 (y)')
    plt.title('Prediction Curve')
    plt.legend()
    plt.show()

    # # Extract x (Force), y1 (Component_1), and y2 (Component_2)
    # x = data['Force'].values.reshape(-1, 1)
    # y = data[['Component_1', 'Component_2']].values
    #
    # # Normalize the data
    # scaler_x = MinMaxScaler()
    # scaler_y = MinMaxScaler()
    # x_scaled = scaler_x.fit_transform(x)
    # y_scaled = scaler_y.fit_transform(y)
    #
    # x_train, x_val, y_train, y_val = train_test_split(x_scaled, y_scaled, test_size=0.2, random_state=42)
    #
    # # Convert data to PyTorch tensors
    # x_train_tensor = torch.tensor(x_train, dtype=torch.float32)
    # y_train_tensor = torch.tensor(y_train, dtype=torch.float32)
    # x_val_tensor = torch.tensor(x_val, dtype=torch.float32)
    # y_val_tensor = torch.tensor(y_val, dtype=torch.float32)
    #
    # train_dataset = torch.utils.data.TensorDataset(x_train_tensor, y_train_tensor)
    # val_dataset = torch.utils.data.TensorDataset(x_val_tensor, y_val_tensor)
    #
    # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
    # val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=32, shuffle=False)
    #
    # # Initialize the model, loss function, and optimizer
    # model = ANNModel(output_dim=2)
    # criterion = nn.MSELoss()
    # optimizer = optim.Adam(model.parameters(), lr=0.01)
    #
    # # Check for CUDA availability
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    #
    # # Train the model
    # epochs = 100
    # train_losses, val_losses = train_model(model, criterion, optimizer, train_loader, val_loader, epochs, device=device)
    #
    # # Plot learning curve
    # plt.figure(figsize=(10, 6))
    # plt.plot(train_losses, label='Train Loss')
    # plt.plot(val_losses, label='Validation Loss')
    # plt.xlabel('Epochs')
    # plt.ylabel('Loss')
    # plt.title('Learning Curve')
    # plt.legend()
    # plt.show()
    #
    # # Evaluate and plot 3D predictions
    # model.eval()
    # with torch.no_grad():
    #     x_full_tensor = torch.tensor(x_scaled, dtype=torch.float32).to(device)
    #     y_pred_scaled = model(x_full_tensor).cpu().numpy()
    #     y_pred = scaler_y.inverse_transform(y_pred_scaled)
    #
    # fig = plt.figure(figsize=(12, 8))
    # ax = fig.add_subplot(111, projection='3d')
    # ax.scatter(x, y[:, 0], y[:, 1], label='True Data', alpha=0.6)
    # ax.scatter(x, y_pred[:, 0], y_pred[:, 1], color='red', label='Predicted Data', alpha=0.6)
    # ax.set_xlabel('Force (x)')
    # ax.set_ylabel('Component_1 (y1)')
    # ax.set_zlabel('Component_2 (y2)')
    # ax.set_title('3D Prediction Curve')
    # ax.legend()
    # plt.show()
