import time

import numpy as np
import torch
import torch.utils.data as tud
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score, confusion_matrix
from sklearn.metrics import recall_score
from dataset import CELESTIAL_BODY as CB

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# the data size of one file in the training dataset
TRAIN_GROUP_SIZE = 20000

# the data size of two files in the testing dataset
TEST_GROUP_SIZE = 40000


def model_train(model, dl, cri, opt, scheduler, num_epochs=10, data_group=0) -> torch.nn:
    """
    train the model given
    :param model: the model needs to be trained
    :param dl: torch.utils.data.Dataloader, the dataset to train the model
    :param cri: criterion, or which we say loss function
    :param opt: optimizer, to define the gradient descend algorithm
    :param scheduler: the learning rate changer
    :param num_epochs: epochs we train the model
    :param data_group: the dataset group. Because we have 6 dataset files, so we need this th define the file we use
    :return: a trained model with excellent weights
    """
    # we want to see the training time
    start_time = time.time()

    for epoch in range(num_epochs):
        scheduler.step()
        model.train()

        running_loss = 0.0
        running_corrects = 0

        for inputs, labels in dl:
            inputs = inputs.to(device)
            labels = labels.to(device)

            # Now we set optimizer to zero gradient in order to train the model
            opt.zero_grad()
            torch.set_grad_enabled(True)
            outputs = model(inputs)
            _, pres = torch.max(outputs, 1)

            # start calculating loss and backward by using criterion
            loss = cri(outputs, labels)
            loss.backward()
            opt.step()

            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(pres == labels.data)

        loss = running_loss / TRAIN_GROUP_SIZE
        accuracy = running_corrects / TRAIN_GROUP_SIZE
        print('data group ' + str(data_group + 1) + ':')
        print('model loss: %f\n'
              'model accuracy: %f' %
              (loss,
               accuracy))
        time_elapsed = time.time() - start_time
        print(('this epoch costs time about %dm %ds\n' % (time_elapsed // 60, time_elapsed % 60)))
    return model


def model_test(model,
               cri,
               dl: torch.utils.data.DataLoader,
               save=True) -> None:
    """
    test the model
    :param model: model to be tested
    :param cri: the criterion function to test the model
    :param opt: the optimizer to test the model(maybe not needed?)
    :param dl: the dataloader to test the model
    :param save: if we want to save the model, we set this to True
    :return: None
    """
    print('Start testing:')
    # start to evaluate the model
    model.eval()

    running_loss = 0.0
    running_corrects = 0
    prediction_result = []
    true_result = []

    for inputs, labels in dl:
        inputs = inputs.to(device)
        labels = labels.to(device)

        # because we test our model, the model gradient should not be changed
        torch.set_grad_enabled(False)
        outputs = model(inputs)
        _, prediction = torch.max(outputs, 1)
        loss = cri(outputs, labels)

        # start to calculate the total test loss of our model, and save the prediction results and true results
        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(prediction == labels.data)
        prediction_result = np.hstack((prediction_result, prediction.cpu().int()))
        true_result = np.hstack((true_result, labels.data.cpu().int()))

    # print the f1_score, precision and recall metrics
    f1_scores = f1_score(true_result, prediction_result, average=None)
    precisions = precision_score(true_result, prediction_result, average=None)
    recalls = recall_score(true_result, prediction_result, average=None)
    matrix = confusion_matrix(true_result, prediction_result)
    cm = '------------------------------\n' + \
         '       |  qso  |  star | galaxy |\n' + \
         '------------------------------\n' + \
         'qso    |  {}  |  {} | {} |\n'.format(matrix[0][0], matrix[0][1], matrix[0][2],) + \
         '------------------------------\n' + \
         'star   | {} | {} | {} |\n'.format(matrix[1][0], matrix[1][1], matrix[1][2],) + \
         '------------------------------\n' + \
         'galaxy | {} | {} | {} |\n'.format(matrix[2][0], matrix[2][1], matrix[2][2],) + \
         '----------------------------\n'
    table = '---------------------------------------------------------------\n' + \
            '         | qso             | star            | galaxy          |\n' + \
            '---------------------------------------------------------------|\n' + \
            'f1_score | {} | {} | {} |\n'.format(
                f1_scores[CB['qso']], f1_scores[CB['star']], f1_scores[CB['galaxy']]) + \
            '---------------------------------------------------------------|\n' + \
            'precision| {} | {} | {} |\n'.format(
                precisions[CB['qso']], precisions[CB['star']], precisions[CB['galaxy']],) + \
            '---------------------------------------------------------------|\n' + \
            'recall   | {} | {} | {} |\n'.format(
                recalls[CB['qso']], recalls[CB['star']], recalls[CB['galaxy']]) + \
            '----------------------------------------------------------------'
    print("confusion matrix:\n", cm)
    print("average f1_score: %.4f" % (f1_score(true_result, prediction_result, average="macro")))
    print(table)
    loss = running_loss / TEST_GROUP_SIZE
    accuracy = running_corrects / TEST_GROUP_SIZE
    print('test Loss: %f, test accuracy: %f' % (loss, accuracy))

    # we need to save the model
    if save:
        torch.save(model.state_dict(), "model.pth")
