from model import MyModel
import pandas as pd
from sklearn.utils import resample
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import recall_score, f1_score, roc_auc_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt


def split_X_and_Y(samples):
    # Specify the data
    features = samples.iloc[:, 0:(samples.shape[1] - 1)]
    labels = samples.iloc[:, -1]

    return features, labels


# normalize input data
def normalize_data(X_train, X_test):
    # Define the scaler
    # scaler = StandardScaler().fit(X_train)
    scaler = MinMaxScaler(feature_range=(-1, 1)).fit(X_train)

    # Scale the train set
    X_train = scaler.transform(X_train)

    # Scale the test set
    X_test = scaler.transform(X_test)

    return X_train, X_test


def train_model(model, train_loader, criterion, optimizer, device, num_epochs=20):
    model.train()
    for epoch in range(num_epochs):
        running_loss = 0.0
        for inputs, labels in train_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {running_loss / len(train_loader):.4f}')


class TimeSeriesDataset(Dataset):
    def __init__(self, features, labels, time_steps):
        self.time_sequences, self.features, self.labels = self.create_sequences(features, labels, time_steps)

    def create_sequences(self, features_init, labels_init, time_steps):
        time_sequences = []
        features = []
        labels = []
        seq_num = len(labels_init) - time_steps
        for i in range(seq_num):
            time_seq = features_init[i:i + time_steps]
            time_sequences.append(time_seq)
            feature = features_init[i + time_steps]
            features.append(feature)
            label = labels_init[i + time_steps]  # take the next time-step label
            labels.append(label)
        return torch.stack(time_sequences, dim=0), torch.stack(features, dim=0), torch.stack(labels, dim=0)

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.time_sequences[idx], self.features[idx], self.labels[idx]


def train_model(args, device):
    # UPSAMPLE OF NORMAL FLOWS
    train_set = pd.read_csv('cicddos2019/01-12/train_set_proc.csv', sep=',')

    # separate minority and majority classes
    is_benign = train_set[' Label'] == 0  # base de dados toda junta

    normal = train_set[is_benign]
    ddos = train_set[~is_benign]

    # upsample minority
    normal_upsampled = resample(normal,
                                replace=True,  # sample with replacement
                                n_samples=len(ddos),  # match number in majority class
                                random_state=27)  # reproducible results

    # combine majority and upsampled minority
    upsampled = pd.concat([normal_upsampled, ddos])
    train_X, train_Y = split_X_and_Y(upsampled)

    input_size = (train_X.shape[1], 1)

    del train_set, normal_upsampled, ddos, upsampled, normal  # , l1, l2

    epochs = 10
    batch_size = 256
    learning_rate = 0.001
    features_num = input_size[0]
    input_channels = 1
    time_steps = 3
    hidden_size = 32
    num_layers = 2
    LSTM_out_size = 2

    # train_dataset = CustomDataset(format_3d(train_X), torch.FloatTensor(train_Y.values.tolist()))    # format_3d has changed df to tensor
    # test_dataset = CustomDataset(format_3d(test_X), torch.FloatTensor(test_Y.values.tolist()))
    train_dataset = TimeSeriesDataset(torch.tensor(train_X, dtype=torch.float32),
                                      torch.FloatTensor(train_Y.values.tolist()), time_steps)
    test_dataset = TimeSeriesDataset(torch.tensor(test_X, dtype=torch.float32),
                                     torch.FloatTensor(test_Y.values.tolist()), time_steps)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    # Initialize model, loss function, and optimizer
    # model = CNNModel(features_num, input_channels).to(device)
    model = MyModel(input_channels, features_num, hidden_size, num_layers, time_steps, LSTM_out_size).to(device)
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = StepLR(optimizer, step_size=4, gamma=0.1)

    # Train the model
    model.train()

    # save the 'loss' 'acc' 'recall' 'f1' 'roc'
    loss_history = []
    acc_history = []
    recall_history = []
    f1_history = []
    roc_auc_history = []

    for epoch in range(epochs):
        total_loss = 0
        total_correct = 0
        total_samples = 0
        y_true = []
        y_score = []
        for sequences, features, labels in train_dataloader:
            sequences, features, labels = sequences.to(device), features.to(device), labels.to(device)

            outputs = model(sequences, features)
            loss = criterion(outputs, labels.view(-1, 1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # compute the total loss and total correct predictions
            total_loss += loss.item() * labels.size(0)
            total_correct += ((outputs > 0.5).view(-1) == labels).sum().item()
            total_samples += labels.size(0)

            # save the true labels and predicted probabilities for ROC AUC score
            y_true.extend(labels.tolist())
            y_score.extend([output.item() for output in outputs])

        # compute the average loss and accuracy
        avg_loss = total_loss / total_samples
        avg_acc = total_correct / total_samples
        loss_history.append(avg_loss)
        acc_history.append(avg_acc)

        # compute recall, F1 score and ROC AUC score
        recall = recall_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        f1 = f1_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        roc_auc = roc_auc_score(y_true, y_score)

        recall_history.append(recall)
        f1_history.append(f1)
        roc_auc_history.append(roc_auc)

        print(
            f'Epoch [{epoch + 1}/{epochs}], Loss: {avg_loss}, Accuracy: {avg_acc}, Recall: {recall}, F1 Score: {f1}, ROC AUC Score: {roc_auc}')

    # draw the loss, accuracy, recall, F1 score and ROC AUC score history
    plt.figure(figsize=(15, 10))
    plt.subplot(2, 3, 1)
    plt.plot(loss_history, label='Train Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(2, 3, 2)
    plt.plot(acc_history, label='Train Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.subplot(2, 3, 3)
    plt.plot(recall_history, label='Train Recall')
    plt.xlabel('Epoch')
    plt.ylabel('Recall')
    plt.legend()

    plt.subplot(2, 3, 4)
    plt.plot(f1_history, label='Train F1 Score')
    plt.xlabel('Epoch')
    plt.ylabel('F1 Score')
    plt.legend()

    plt.subplot(2, 3, 5)
    plt.plot(roc_auc_history, label='Train ROC AUC Score')
    plt.xlabel('Epoch')
    plt.ylabel('ROC AUC Score')
    plt.legend()

    plt.savefig('training_history.png')
    plt.show()

    return model


def test_model(model, test_dataloader, criterion, device):
    test_set = pd.read_csv('cicddos2019/01-12/test_set_proc.csv', sep=',')
    test_X, test_Y = split_X_and_Y(test_set)

    train_X, test_X = normalize_data(train_X, test_X)
    print('input_size:', input_size)

    model.eval()

    total_loss = 0
    total_correct = 0
    total_samples = 0
    y_true = []
    y_score = []

    with torch.no_grad():
        for sequences, features, labels in test_dataloader:
            sequences, features, labels = sequences.to(device), features.to(device), labels.to(device)

            outputs = model(sequences, features)
            loss = criterion(outputs, labels.view(-1, 1))

            # compute the total loss and total correct predictions
            total_loss += loss.item() * labels.size(0)
            total_correct += ((outputs > 0.5).view(-1) == labels).sum().item()
            total_samples += labels.size(0)

            # save the true labels and predicted probabilities for ROC AUC score
            y_true.extend(labels.tolist())
            y_score.extend([output.item() for output in outputs])

        # compute the average loss and accuracy
        avg_loss = total_loss / total_samples
        avg_acc = total_correct / total_samples

        # compute recall, F1 score and ROC AUC score
        recall = recall_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        f1 = f1_score(y_true, [1 if score > 0.5 else 0 for score in y_score])
        roc_auc = roc_auc_score(y_true, y_score)

        print(
            f'Test Loss: {avg_loss}, Test Accuracy: {avg_acc}, Recall: {recall}, F1 Score: {f1}, ROC AUC Score: {roc_auc}')