import numpy as np
import pandas as pd
from prettytable import PrettyTable

def getResults1():
    results = np.load('Results_kdd99_30_pretrained.npy')
    results2 = results[0:100].transpose([0,1,2]).reshape(1800,4)
    ai = np.argmax(results2[:,0])
    pi = np.argmax(results2[:,1])
    ri = np.argmax(results2[:,2])
    fi = np.argmax(results2[:,3])
    x = PrettyTable(results.dtype.names)
    x.add_row(results2[ai])
    x.add_row(results2[pi])
    x.add_row(results2[ri])
    x.add_row(results2[fi])
    print(x)

def getResults():
    results = np.load('Results_Invert_kdd99_30_pretrained_epochs21_22.npy')
    results2 = results[21:23].transpose([0,1,2]).reshape(10,5)
    ai = np.argmax(results2[:,0])
    pi = np.argmax(results2[:,1])
    ri = np.argmax(results2[:,2])
    fi = np.argmax(results2[:,3])
    x = PrettyTable(results.dtype.names)
    x.add_row(results2[ai])
    x.add_row(results2[pi])
    x.add_row(results2[ri])
    x.add_row(results2[fi])
    print(x)



# if __name__ == "__main__":
#     print('Starting...')
#     # getResults()
#     print('Finishing...')

#     metricsDf = pd.DataFrame(columns=['tao', 'lam', 'rfunction', "Acc", "Pre", "Rec", "F1"])
#     k = 0
#     for i in range(1, 10):
#         tao = 0.1*i
#         for j in range(1, 10):
#             lam = 0.1*j

#             Accu1, Pre1, Rec1, F11 = 1, 1, 1, 1
#             Accu2, Pre2, Rec2, F12 = 2, 2, 2, 2
#             Accu3, Pre3, Rec3, F13 = 3, 3, 3, 3
#             Accu4, Pre4, Rec4, F14 = 4, 4, 4, 4
#             Accu5, Pre5, Rec5, F15 = 5, 5, 5, 5
#             Accu6, Pre6, Rec6, F16 = 6, 6, 6, 6
#             Accu7, Pre7, Rec7, F17 = 7, 7, 7, 7
#             Accu8, Pre8, Rec8, F18 = 8, 8, 8, 8

#             metricsDf.loc[8*k + 0] = [tao, lam, 1, Accu1, Pre1, Rec1, F11]
#             metricsDf.loc[8*k + 1] = [tao, lam, 2, Accu2, Pre2, Rec2, F12]
#             metricsDf.loc[8*k + 2] = [tao, lam, 3, Accu3, Pre3, Rec3, F13]
#             metricsDf.loc[8*k + 3] = [tao, lam, 4, Accu4, Pre4, Rec4, F14]
#             metricsDf.loc[8*k + 4] = [tao, lam, 5, Accu5, Pre5, Rec5, F15]
#             metricsDf.loc[8*k + 5] = [tao, lam, 6, Accu6, Pre6, Rec6, F16]
#             metricsDf.loc[8*k + 6] = [tao, lam, 7, Accu7, Pre7, Rec7, F17]
#             metricsDf.loc[8*k + 7] = [tao, lam, 8, Accu8, Pre8, Rec8, F18]
#             k = k + 1

#     metricsDf.to_pickle("autoencoder/paulo.pkl")

#     loadedMetricsDf = pd.read_pickle("autoencoder/paulo.pkl")


def fix_pre_rec_f1(df):
    for index, row in df.iterrows():
        if(row['rfunction'] <= 3):
            row['Pre'] = row['Pre'] * 100
            row['Rec'] = row['Rec'] * 100
            # print(row['Pre'], row['Rec'], row['F1'])

def getHighests(df):
    ha = df.loc[df['Acc'].idxmax()]
    hp = df.loc[df['Pre'].idxmax()]
    hr = df.loc[df['Rec'].idxmax()]
    hf = df.loc[df['F1'].idxmax()]
    lfpr = df.loc[df['FPR'].idxmin()]
    return ha, hp, hr, hf, lfpr

if __name__ == "__main__":
    print('Starting...')
    
    metricsDf = pd.read_pickle("autoencoder/results_ad_disc/Results_tao_lam_kdd99_30.pkl")      # ['tao', 'lam', 'rfunction', "Acc", "Pre", "Rec", "F1", "FPR"]

    fix_pre_rec_f1(metricsDf)

    ha, hp, hr, hf, lfpr = getHighests(metricsDf)

    metricsDf.loc[metricsDf['Acc'].idxmax()]
    metricsDf.loc[metricsDf['Pre'].idxmax()]
    metricsDf.loc[metricsDf['Rec'].idxmax()]
    metricsDf.loc[metricsDf['F1'].idxmax()]
    metricsDf.loc[metricsDf['FPR'].idxmin()]

    print('Finishing...')



    # Parameters and Results Paths -----------------------------------------------------------------------------------------
    path_autoencoder_training_parameters = "./Experiments_Autoencoder/Autoencoder_Loss_G/Training_Parameters/autoencoder_kdd99_30_"
    # path_autoencoder_training_parameters = "./Experiments_Autoencoder/Autoencoder_Loss_G_D/Training_Parameters/autoencoder_kdd99_30_"
    path_autoencoder_training_results = "./Experiments_Autoencoder/Autoencoder_Loss_G/Training_Results/"
    # path_autoencoder_training_results = "./Experiments_Autoencoder/Autoencoder_Loss_G_D/Training_Results/"
    #-----------------------------------------------------------------------------------------------------------------------
    r = np.loadtxt(path_autoencoder_training_results + 'loss_per_epoch.txt')
    i = np.argmin(r)
    r[i]
    r[999]


    # Sem o discriminador (estava melhor antes com o modelo antigo do LSTM no discriminador da RGAN)
    # Última época: epoch_autoencoder = 999 - loss = 1945.676169
    # Menor época: epoch_autoencoder = 890 - loss = 409.788901
    # Época maior: epoch_autoencoder = 250 - loss = 3225.807047

    