import argparse
import os

import pandas

# We should keep the imports in order to let the globals() detect the methods
from autoencoder import train_and_test_autoencoder_model
from gdn_pre import gdn_pre
from lstmvae import train_and_test_lstm_vae_model
from dagmm import train_and_test_dagmm_model
from gdn import train_and_test_gdn_model
from usad import train_and_test_usad_model
from omnianomaly import train_and_test_omni_anomaly_model
from mscred import train_and_test_mscred_model
from mtad_gat import train_and_test_mtad_gat_model
from model_utils import create_all_dataloader


def call_model_train_test_and_to_csv(model_name, dataloader):
    model_func = f'train_and_test_{model_name}_model'
    # We used globals() to reflect the methods imported above
    confusion_matrix = globals()[model_func](normal_dataloader=dataloader['normal'],
                                             attack_dataloader=dataloader['attack'],
                                             epochs=args.epoch,
                                             continue_train=False,
                                             dataset_name=dataloader['dataset_name'],
                                             data_dir=args.data_dir)
    # write y_pred and y_true to csv file
    confusion_matrix.to_dataframe().to_csv(os.path.join(args.csv_store_path,
                                                        dataloader["dataset_name"], f'{model_name}_{dataloader["dataset_name"].replace("/", "_")}.csv'), index=False)
    return {
        'model name': model_name,
        'dataset': dataloader['dataset_name'],
        'f1 score': confusion_matrix.f1_score,
        'tsad score': confusion_matrix.tsad_score,
        'precision': confusion_matrix.precision,
        'recall': confusion_matrix.recall,
        'detection time': confusion_matrix.time
    }


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='baselines test')
    parser.add_argument('--data_dir', type=str, default='./data')
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--models', nargs='+')
    parser.add_argument('--csv_store_path', type=str, default='./csv')
    parser.add_argument('--epoch', type=int, default=50)
    args = parser.parse_args()

    gdn_pre(data_dir=args.data_dir)

    # create all the dataloaders from the datasets stored in the data directory
    all_dataloader = create_all_dataloader(
        data_directory=args.data_dir, batch_size=args.batch_size)

    # read all the models that need to test
    models = args.models

    results = []
    for dl in all_dataloader:
        # make sure all the dirs exist
        if not os.path.exists(os.path.join(args.csv_store_path, dl['dataset_name'])):
            os.makedirs(os.path.join(args.csv_store_path, dl['dataset_name']))

        for model in models:
            results.append(call_model_train_test_and_to_csv(model, dl))

    # write results to csv file
    with open(os.path.join(args.csv_store_path, 'result.txt'), 'w') as result_file:
        pandas.DataFrame(results).to_csv(
            result_file, index=False, float_format='%.2f')
