from model import MyModel
from dataset import TimeSeriesDataset
import pandas as pd
from sklearn.utils import resample
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import recall_score, f1_score, roc_auc_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
import wandb


def split_X_and_Y(samples):
    # Specify the data
    features = samples.iloc[:, 0:(samples.shape[1] - 1)]
    labels = samples.iloc[:, -1]

    return features, labels


# normalize input data
def normalize_data(X_train, X_valid, X_test):
    # Define the scaler
    scaler = MinMaxScaler(feature_range=(-1, 1)).fit(X_train)

    # Scale the set
    X_train = scaler.transform(X_train)
    X_valid = scaler.transform(X_valid)
    X_test = scaler.transform(X_test)

    return X_train, X_valid, X_test


def train_and_test_model(args):
    input_channels = 1
    time_steps = args.time_steps
    hidden_size = args.hidden_size
    num_layers = args.num_layers
    LSTM_out_size = args.LSTM_out_size

    epochs = args.epochs
    learning_rate = args.lr
    batch_size = args.batch_size

    # UPSAMPLE OF NORMAL FLOWS
    train_set = pd.read_csv('cicddos2019/01-12/train_set_proc.csv', sep=',')

    # separate minority and majority classes
    is_benign = train_set[' Label'] == 0  # base de dados toda junta

    normal = train_set[is_benign]
    ddos = train_set[~is_benign]

    # upsample minority
    normal_upsampled = resample(normal,
                                replace=True,  # sample with replacement
                                n_samples=len(ddos),  # match number in majority class
                                random_state=27)  # reproducible results

    # combine majority and upsampled minority
    upsampled = pd.concat([normal_upsampled, ddos])
    train_X, train_Y = split_X_and_Y(upsampled)

    features_num = train_X.shape[1]

    del train_set, normal_upsampled, ddos, upsampled, normal  # , l1, l2

    valid_set = pd.read_csv('cicddos2019/01-12/valid_set_proc.csv', sep=',')
    test_set = pd.read_csv('cicddos2019/01-12/test_set_proc.csv', sep=',')
    valid_X, valid_Y = split_X_and_Y(valid_set)
    test_X, test_Y = split_X_and_Y(test_set)

    train_X, valid_X, test_X = normalize_data(train_X, valid_X, test_X)

    # Device configuration
    if args.use_cuda and torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # Create dataloaders
    train_dataset = TimeSeriesDataset(torch.tensor(train_X, dtype=torch.float32), torch.FloatTensor(train_Y.values.tolist()), time_steps)
    valid_dataset = TimeSeriesDataset(torch.tensor(valid_X, dtype=torch.float32), torch.FloatTensor(valid_Y.values.tolist()), time_steps)
    test_dataset = TimeSeriesDataset(torch.tensor(test_X, dtype=torch.float32), torch.FloatTensor(test_Y.values.tolist()), time_steps)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    # Initialize model, loss function, and optimizer
    model = MyModel(input_channels, features_num, hidden_size, num_layers, time_steps, LSTM_out_size).to(device)
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = StepLR(optimizer, step_size=4, gamma=0.1)

    # Train the model
    model.train()

    # save the 'loss' 'acc' 'recall' 'f1' 'roc'
    loss_history = []
    acc_history = []
    recall_history = []
    f1_history = []
    roc_auc_history = []

    for epoch in range(epochs):
        total_loss = 0
        total_correct = 0
        total_samples = 0
        y_true = []
        y_score = []
        for sequences, features, labels in train_dataloader:
            sequences, features, labels = sequences.to(device), features.to(device), labels.to(device)

            outputs = model(sequences, features)
            loss = criterion(outputs, labels.view(-1, 1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # compute the total loss and total correct predictions
            total_loss += loss.item() * labels.size(0)
            total_correct += ((outputs > 0.5).view(-1) == labels).sum().item()
            total_samples += labels.size(0)

            # save the true labels and predicted probabilities for ROC AUC score
            y_true.extend(labels.tolist())
            y_score.extend([output.item() for output in outputs])

        # compute the average loss and accuracy
        avg_loss = total_loss / total_samples
        avg_acc = total_correct / total_samples
        loss_history.append(avg_loss)
        acc_history.append(avg_acc)

        # compute recall, F1 score and ROC AUC score
        recall = recall_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        f1 = f1_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        roc_auc = roc_auc_score(y_true, y_score)

        recall_history.append(recall)
        f1_history.append(f1)
        roc_auc_history.append(roc_auc)

        scheduler.step()

        print(f'Epoch [{epoch + 1}/{epochs}], Loss: {avg_loss}, Accuracy: {avg_acc}, Recall: {recall}, F1 Score: {f1}, ROC AUC Score: {roc_auc}')

        # Validate the model
        model.eval()

        total_valid_loss = 0
        total_valid_correct = 0
        total_valid_samples = 0
        y_valid_true = []
        y_valid_score = []
        with torch.no_grad():
            for sequences, features, labels in valid_dataloader:
                sequences, features, labels = sequences.to(device), features.to(device), labels.to(device)

                outputs = model(sequences, features)
                loss = criterion(outputs, labels.view(-1, 1))

                # compute the total loss and total correct predictions
                total_valid_loss += loss.item() * labels.size(0)
                total_valid_correct += ((outputs > 0.5).view(-1) == labels).sum().item()
                total_valid_samples += labels.size(0)

                # save the true labels and predicted probabilities for ROC AUC score
                y_valid_true.extend(labels.tolist())
                y_valid_score.extend([output.item() for output in outputs])

            # compute the average loss and accuracy
            avg_valid_loss = total_valid_loss / total_valid_samples
            avg_valid_acc = total_valid_correct / total_valid_samples

            # compute recall, F1 score and ROC AUC score
            valid_recall = recall_score(y_valid_true, [1 if score > 0.5 else 0 for score in y_valid_score])
            valid_f1 = f1_score(y_valid_true, [1 if score > 0.5 else 0 for score in y_valid_score])
            valid_roc_auc = roc_auc_score(y_valid_true, y_valid_score)

            print(
                f'Epoch: {epoch + 1}, Validation Loss: {avg_valid_loss}, Validation Accuracy: {avg_valid_acc}, Recall: {valid_recall}, F1 Score: {valid_f1}, ROC AUC Score: {valid_roc_auc}')

            # wandb logging
            wandb.log({
                'epoch': epoch,
                'train_loss': avg_loss,
                'train_acc': avg_acc,
                'train_recall': recall,
                'train_f1': f1,
                'train_roc_auc': roc_auc,
                'val_loss': avg_valid_loss,
                'val_acc': avg_valid_acc,
                'val_recall': valid_recall,
                'val_f1': valid_f1,
                'val_roc_auc': valid_roc_auc
            })

    # draw the loss, accuracy, recall, F1 score and ROC AUC score history
    plt.figure(figsize=(15, 10))
    plt.subplot(2, 3, 1)
    plt.plot(loss_history, label='Train Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(2, 3, 2)
    plt.plot(acc_history, label='Train Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.subplot(2, 3, 3)
    plt.plot(recall_history, label='Train Recall')
    plt.xlabel('Epoch')
    plt.ylabel('Recall')
    plt.legend()

    plt.subplot(2, 3, 4)
    plt.plot(f1_history, label='Train F1 Score')
    plt.xlabel('Epoch')
    plt.ylabel('F1 Score')
    plt.legend()

    plt.subplot(2, 3, 5)
    plt.plot(roc_auc_history, label='Train ROC AUC Score')
    plt.xlabel('Epoch')
    plt.ylabel('ROC AUC Score')
    plt.legend()

    plt.savefig('training_history.png')
    plt.show()

    # Test the model
    model.eval()

    total_loss = 0
    total_correct = 0
    total_samples = 0
    y_true = []
    y_score = []

    with torch.no_grad():
        for sequences, features, labels in test_dataloader:
            sequences, features, labels = sequences.to(device), features.to(device), labels.to(device)

            outputs = model(sequences, features)
            loss = criterion(outputs, labels.view(-1, 1))

            # compute the total loss and total correct predictions
            total_loss += loss.item() * labels.size(0)
            total_correct += ((outputs > 0.5).view(-1) == labels).sum().item()
            total_samples += labels.size(0)

            # save the true labels and predicted probabilities for ROC AUC score
            y_true.extend(labels.tolist())
            y_score.extend([output.item() for output in outputs])

        # compute the average loss and accuracy
        avg_loss = total_loss / total_samples
        avg_acc = total_correct / total_samples

        # compute recall, F1 score and ROC AUC score
        recall = recall_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        f1 = f1_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        roc_auc = roc_auc_score(y_true, y_score)

        print(
            f'Test Loss: {avg_loss}, Test Accuracy: {avg_acc}, Recall: {recall}, F1 Score: {f1}, ROC AUC Score: {roc_auc}')