import os
import sys

import sklearn.metrics

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam
import utils
from keras.callbacks import EarlyStopping
from nettcr_architectures import nettcr_ab
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import keras.metrics
import random
import seaborn as sns
from imblearn.over_sampling import SMOTE
from argparse import ArgumentParser
import math

# Options for Pandas DataFrame printing
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
# pd.set_option('display.max_colwidth', -1)

succession = True
lr = [0.0005, 0.001, 0.005]
ld = [1, 0.5]
ldt = 10
bs = 128
e = 100

# -------------------------------------------------- 输入参数 ------------------------------------------------------------
parser = ArgumentParser(description="设定输入参数")
parser.add_argument("-n", "--sp_name", default="", help="模型命名")
parser.add_argument("-rm", "--read_model", default=-1, type=int, help="判断是否需要模型")
# 设置输入输出文件位置
parser.add_argument("-tr", "--train_file", default="data/model_input/raw_data/train_set.csv", help="设定训练集")
parser.add_argument("-te", "--test_file", default="data/model_input/raw_data/gig_test.csv", help="设定测试集")
# 设置超参
parser.add_argument("-e", "--epochs", default=1, type=int, help="设定训练时期数")
parser.add_argument("-lr", "--learn_rate", default=0.001, type=float, help="设定优化算法学习率")
parser.add_argument("-ld", "--lr_decay_rate", default=-1, type=float, help="学习率衰减")
parser.add_argument("-ldt", "--lr_decay_times", default=-1, type=int, help="学习率衰减次数")
parser.add_argument("-bs", "--batch_size", default=128, type=int, help="批大小")

# 设置测试or训练
parser.add_argument("-t", "--if_skip_train", default=-1, type=int, help="如果大于等于0，则读取第参数批的模型进行测试")

args = parser.parse_args()
LEARN_RATE = float(args.learn_rate)
LEARN_RATE_DECAY_RATE = float(args.lr_decay_rate)
LEARN_RATE_DECAY_TIMES = int(args.lr_decay_times)
EPOCHS = int(args.epochs)
BATCH_SIZE = int(args.batch_size)
special_name = args.sp_name
if_skip_train = args.if_skip_train
if if_skip_train > EPOCHS:
    if_skip_train = EPOCHS
read_model = args.read_model
if read_model > EPOCHS:
    read_model = EPOCHS

output_path = 'data/model_output/' + special_name + 'lr' + str(LEARN_RATE) + 'bs' + str(BATCH_SIZE) + 'ep' + str(
    EPOCHS) + '/'
if LEARN_RATE_DECAY_RATE > 0 and LEARN_RATE_DECAY_TIMES > 0:
    output_path = output_path + 'ld' + str(LEARN_RATE_DECAY_RATE) + 'ldt' + str(LEARN_RATE_DECAY_TIMES) + '/'
train_model_path = output_path + 'trained_model/'
his_path = output_path + 'history/'
if not os.path.exists(output_path):
    os.makedirs(output_path)
if not os.path.exists(train_model_path):
    os.mkdir(train_model_path)
if not os.path.exists(his_path):
    os.mkdir(his_path)

# --------------------------------------------------- 数据处理 -----------------------------------------------------------
print('Loading and encoding the data..')
train_data = pd.read_csv(args.train_file)
test_data = pd.read_csv(args.test_file)

# Encode data
encoding = utils.blosum50_20aa
early_stop = EarlyStopping(monitor='loss', min_delta=0,
                           patience=10, verbose=0, mode='min', restore_best_weights=True)

pep_train = utils.enc_list_bl_max_len(train_data.peptide, encoding, 9)
tcra_train = utils.enc_list_bl_max_len(train_data.CDR3a, encoding, 30)
tcrb_train = utils.enc_list_bl_max_len(train_data.CDR3b, encoding, 30)
y_train = np.array(train_data.binder)

pep_test = utils.enc_list_bl_max_len(test_data.peptide, encoding, 9)
tcra_test = utils.enc_list_bl_max_len(test_data.CDR3a, encoding, 30)
tcrb_test = utils.enc_list_bl_max_len(test_data.CDR3b, encoding, 30)
y_test = np.array(test_data.binder)

train_inputs = [tcra_train, tcrb_train, pep_train]
test_inputs = [tcra_test, tcrb_test, pep_test]

mdl = []
history = []
pred_y = []
# --------------------------------------------------- 训练模型 -----------------------------------------------------------

METRICS = [
    keras.metrics.BinaryAccuracy(name='accuracy'),
    keras.metrics.Precision(name='precision'),
    keras.metrics.Recall(name='recall'),
    keras.metrics.AUC(name='auc'),
    keras.metrics.AUC(name='acc'),
    keras.metrics.AUC(name='prc', curve='PR'),  # precision-recall curve
]


def eval_mdl(test_mdl):
    # --------------- 在测试集上使用模型 ----------------
    pred = test_mdl.predict(test_inputs, verbose=0)
    return pred
    # pred_df.to_csv(args.outfile, index=False)


if if_skip_train < 0:
    mdl = nettcr_ab()
    # 设置学习率衰减
    if LEARN_RATE_DECAY_RATE > 0 and LEARN_RATE_DECAY_TIMES > 0:
        lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            LEARN_RATE,
            decay_steps=int(BATCH_SIZE * EPOCHS / LEARN_RATE_DECAY_TIMES),
            decay_rate=LEARN_RATE_DECAY_RATE,
            staircase=False)
        mdl.compile(loss="binary_crossentropy", optimizer=Adam(lr_schedule), metrics=METRICS)
    else:
        mdl.compile(loss="binary_crossentropy", optimizer=Adam(LEARN_RATE), metrics=METRICS)

    print('Training..')
    train_batches = []
    y_train_batches = []
    # 每个时期的训练状况
    epoch_his = []

    train_batches, y_train_batches, valid_batches = utils.spilt_data(train_inputs, y_train, EPOCHS, BATCH_SIZE, 0.1)

    # 可以在这里设置EPOCHS、BATCH的衰减
    for i in range(EPOCHS):
        j_list = list(range(1, int(len(y_train) / BATCH_SIZE)))
        random.shuffle(j_list)
        for j in j_list:
            # 训练一个批次，记录批次日志
            epoch_his.append(mdl.fit(train_batches[j - 1], y_train_batches[j - 1], batch_size=BATCH_SIZE, verbose=2,
                                     callbacks=[early_stop], validation_data=valid_batches).history)
        # 存储该时期模型
        mdl.save(train_model_path + 'trained_ep' + str(i) + '.tf2')
        # 存储该时期模型训练日志
        ep_his_path = his_path + 'ep' + str(i) + 'his.npy'
        with open(ep_his_path, 'wb') as f:
            np.save(f, epoch_his, allow_pickle=True)
        history.append(epoch_his)
        epoch_his = []
        # 存储该时期测试表现
        pred_y = (eval_mdl(mdl))
        ep_pred_path = his_path + 'ep' + str(i) + 'pred.npy'
        with open(ep_pred_path, 'wb') as f:
            np.save(f, pred_y, allow_pickle=True)

    his_path = his_path + 'final_his.npy'
    try:
        with open(his_path, 'wb') as f:
            np.save(f, history, allow_pickle=True)
    except ImportError as e:
        print("错误")

else:
    print("跳过训练，读取模型文件测试")
    for i in range(if_skip_train):
        model_name = train_model_path + "trained_ep" + str(i) + '.tf2'
        if read_model > 0:
            try:
                mdl.append(keras.models.load_model(model_name))
            except ImportError as e:
                print(model_name + "不存在")
        try:
            history.append(np.load(his_path + 'ep' + str(i) + 'his.npy', allow_pickle=True))
            pred_y.append(np.load(his_path + 'ep' + str(i) + 'pred.npy', allow_pickle=True))
        except ImportError as e:
            print(model_name + "不存在")
        except FileNotFoundError as f:
            print(model_name + "不存在")

# -------------------------------------------------- 测试模型 ------------------------------------------------------------
def eval_mdl(test_mdl):
    # --------------- 在测试集上使用模型 ----------------
    pred = test_mdl.predict(test_inputs, verbose=0)
    pred_df = pd.concat([test_data, pd.Series(np.ravel(pred), name='prediction')], axis=1)
    # pred_df.to_csv(args.outfile, index=False)
    return pred


# -------------------------------------------------- 测试模型 ------------------------------------------------------------
if mdl:
    print('Evaluating..')
    if if_skip_train > 0:
        loss_art = np.zeros((if_skip_train,))
        aucart = np.zeros((if_skip_train,))
        recallart = np.zeros((if_skip_train,))
        precisionart = np.zeros((if_skip_train,))
        for i in range(if_skip_train):
            model_name = train_model_path + "trained_ep" + str(i) + '.tf2'
            loss, tp, fp, tn, fn, accuracy, precision, recall, auc, prc = mdl[i].evaluate(test_inputs, y_test,
                                                                                          verbose=2)
            loss_art[i] = loss
            aucart[i] = auc
            recallart[i] = recall
            precisionart[i] = precision
            pred_y.append(eval_mdl(mdl[i]))
        utils.plot_test_graphs(loss_art, 'loss', output_path)
        utils.plot_test_graphs(aucart, 'auc', output_path)
        utils.plot_test_graphs(precisionart, 'precision', output_path)
        utils.plot_test_graphs(recallart, 'recall', output_path)

# -------------------------------------------------- 分析测试表现 --------------------------------------------------------
# 绘制验证集和训练集各期的指标得分
if if_skip_train > 0:
    draw_metrics = ["loss", "acc",  "prc", "recall", "auc"]
    for m in draw_metrics:
        train_m = utils.get_metric_from_history(history, m)
        val_m = utils.get_metric_from_history(history, 'val_' + m)
        utils.plot_graphs(train_m, val_m, m, output_path)

    bound = 0.5
    test_acc = np.zeros(EPOCHS)
    test_roc_auc = np.zeros(EPOCHS)
    test_precision = np.zeros(EPOCHS)
    test_recall = np.zeros(EPOCHS)
    # 输入某个时期的测试值，得到四项指标；可以试着调整bound值
    for i in range(len(pred_y)):
        ep_y = pred_y[i]
        test_acc[i], test_recall[i], test_precision[i], test_roc_auc[i] = utils.cal_metric(y_test, ep_y, bound)
    utils.plot_test_graphs(test_acc, "test_acc", output_path)
    utils.plot_test_graphs(test_roc_auc, "test_auc", output_path)
    utils.plot_test_graphs(test_recall, "test_recall", output_path)
    utils.plot_test_graphs(test_precision, "test_precision", output_path)
    print(max(test_roc_auc))
    print(np.average(test_roc_auc[test_roc_auc.nonzero()][-1:1:-10]))





