import numpy as np
import time
import pandas as pd
import model
import helper
import train
import sys
import pandas
import openpyxl
import os
import argparse
import scipy.stats as stats

# coding=UTF-8

sys.path.insert(0, "../../DeepSequence/")


def calculate_spearman_correlation(X, Y):
    return stats.spearmanr(X, Y)[0]


def calculate_spearman_correlation_p(X, Y):
    return stats.spearmanr(X, Y)[1]


# proteus_names=['WW','UBC9_HUMAN','RASH_HUMAN','TIM_SULSO','P84126_THETH','MTH3_HAEAESTABILIZED','MK01_HUMAN','KKA2_KLEPN','BLAT_ECOLX','BG_STRSQ','B3VI55_LIPST','AMIE_PSEAE','GFP']

#proteus_names = ['UBC9_HUMAN', 'RASH_HUMAN', 'TIM_SULSO', 'P84126_THETH', 'MTH3_HAEAESTABILIZED']
proteus_names = ['KKA2_KLEPN','BLAT_ECOLX','BG_STRSQ','B3VI55_LIPST','AMIE_PSEAE','GFP']
model_params = {
    "batch_size": 100,
    "encode_dim_zero": 1500,
    "encode_dim_one": 1500,
    "decode_dim_zero": 100,
    "decode_dim_one": 500,
    "n_patterns": 4,
    "n_latent": 30,
    "logit_p": 0.001,
    "sparsity": "logit",
    "encode_nonlin": "relu",
    "decode_nonlin": "relu",
    "final_decode_nonlin": "sigmoid",
    "output_bias": True,
    "final_pwm_scale": True,
    "conv_pat": True,
    "d_c_size": 40
}

train_params = {
    "num_updates": 300000,
    "save_progress": True,
    "verbose": True,
    "save_parameters": False,
}


def create_parser():
    parser = argparse.ArgumentParser(
        description="DeepSequence on MLabel a deep mutational scan with predictions from an ensemble of ESM-1v models.utant."
        # noqa
    )

    parser.add_argument(
        "--dataset-input",
        type=str,
        default=None,
        help="input dataset directory"
    )
    return parser


if __name__ == "__main__":

    parser = create_parser()
    args = parser.parse_args()

    path = args.dataset_input
    if path[-1] == '/':
        path = path
    else:
        path = path + '/'

    # path='Proteus-Legacy/data/common_proteins/'
    # proteus_names = os.listdir(path)

    i = 0
    for proteus in proteus_names:
        i = i + 1
        data_params = {
            "dataset": proteus
        }
        file_prefix = proteus
        data_helper = helper.DataHelper(dataset=data_params["dataset"],
                                        calc_weights=False, path=path)

        vae_model = model.VariationalAutoencoder(data_helper,
                                                 batch_size=model_params["batch_size"],
                                                 encoder_architecture=[model_params["encode_dim_zero"],
                                                                       model_params["encode_dim_one"]],
                                                 decoder_architecture=[model_params["decode_dim_zero"],
                                                                       model_params["decode_dim_one"]],
                                                 n_latent=model_params["n_latent"],
                                                 n_patterns=model_params["n_patterns"],
                                                 convolve_patterns=model_params["conv_pat"],
                                                 conv_decoder_size=model_params["d_c_size"],
                                                 logit_p=model_params["logit_p"],
                                                 sparsity=model_params["sparsity"],
                                                 encode_nonlinearity_type=model_params["encode_nonlin"],
                                                 decode_nonlinearity_type=model_params["decode_nonlin"],
                                                 final_decode_nonlinearity=model_params["final_decode_nonlin"],
                                                 output_bias=model_params["output_bias"],
                                                 final_pwm_scale=model_params["final_pwm_scale"],
                                                 working_dir=".")

        print("Model %d builded" % i)

        job_string = helper.gen_job_string(data_params, model_params)
        print ("JB:" + job_string)

        print (data_helper.focus_seq_name)
        print (str(data_helper.focus_start_loc) + "-" + str(data_helper.focus_stop_loc))
        print (data_helper.focus_seq)
        print('')
        print ("".join(data_helper.focus_seq_trimmed))

        train.train(data_helper, vae_model,
                    num_updates=train_params["num_updates"],
                    save_progress=train_params["save_progress"],
                    save_parameters=train_params["save_parameters"],
                    verbose=train_params["verbose"],
                    job_string=job_string)

        vae_model.save_parameters(file_prefix=job_string)
        print("Model %d trained" % i)

        full_matr_mutant_name_list, full_matr_delta_elbos \
            = data_helper.single_mutant_matrix(vae_model, N_pred_iterations=500)

        print (full_matr_mutant_name_list[0], full_matr_delta_elbos[0])

        tmpp = []
        for j in range(len(full_matr_mutant_name_list)):
            tmpp.append([full_matr_mutant_name_list[j], full_matr_delta_elbos[j]])
        # print("Model %d trained" % i)
        df = pd.DataFrame(tmpp, columns=['DP', 'DP_S'])
        # print(df)

        df_real = pd.read_csv(path + proteus + '/' + 'experiments/' + proteus + '.tsv')
        # print(df_real)
        dps = np.array(df)
        # print(dps)
        reals = []
        zzz = np.array(df_real)
        for z in zzz:
            # print(z)
            z = str(z).replace('[', '').replace(']', '').replace('\'', '').replace('\\', '').split('t')
            # print(z)
            reals.append([z[0], float(z[1])])
        # print(reals)
        real_s = []
        dp_s = []
        for ii in dps:
            for j in reals:
                if ii[0] == j[0]:
                    real_s.append(j[1])
                    dp_s.append(ii[1])
                    break
        x = dp_s
        y = real_s
        # print(x,y)
        sp_p = calculate_spearman_correlation_p(x, y)
        sp = calculate_spearman_correlation(x, y)
        print(proteus + ':')
        print(sp, sp_p)
        out_path = path + proteus + '/'
        # ffff=open(out_path)

        df.to_csv(out_path + 'predictions/' + proteus + '.deepsequence_M.csv', index=None)