import torch

import os
import numpy as np
from datetime import datetime
import argparse
from utils import _logger, set_requires_grad
from dataloader.dataloader import data_generator
from dataloader.dataloader2 import data_generator2
# from trainer.trainer_triplet_loss import Trainer, model_evaluate
# from trainer.trainer import Trainer, model_evaluate
# from trainer.trainer_sim import Trainer, model_evaluate
# from trainer.trainer_sim_plus import Trainer, model_evaluate
from models.TC import TC, TC2
from utils import _calc_metrics, copy_Files
# from models.model import base_Model
# from models.bigru_self_attention_model import bigru_self_attention_model, base_Model
from models.faciesclassificationmodel import Bigru_self_attention_model, base_cnn_Model, base_res_cnn_Model, EfficientResNet18CapsNet
from torchinfo import summary
# Args selections
start_time = datetime.now()

parser = argparse.ArgumentParser()

######################## Model parameters ########################
home_dir = os.getcwd()
parser.add_argument('--experiment_description', default='ExpFACIE', type=str,
                    help='Experiment Description')
parser.add_argument('--run_description', default='run_TS-TCC-Plus_sim', type=str,
                    help='Experiment Description')
parser.add_argument('--arch', default="TS-TCC-Plus", type=str,
                    help='TS-TCC or TS-TCC-Plus')
parser.add_argument('--encoder_backbone', default="BaseCNN", type=str,
                    help='BaseCNN or BIGRU or EResNet-18CapsuleNet')
parser.add_argument('--loss_strategy', default="sim2", type=str,
                    help='sim1, sim2, sim3, or triplet')
parser.add_argument('--seed', default=0, type=int,
                    help='seed value')
parser.add_argument('--training_mode', default='fine_tune', type=str,
                    help='Modes of choice: random_init, supervised, self_supervised, fine_tune, train_linear')
# 修改使用的数据集| modeify the dataset
parser.add_argument('--selected_dataset', default='FACIES', type=str,
                    help='Dataset of choice: sleepEDF, HAR, Epilepsy, pFD, FACIES')
parser.add_argument('--logs_save_dir', default='experiments_logs', type=str,
                    help='saving directory')
parser.add_argument('--device', default='cuda', type=str,
                    help='cpu or cuda')
parser.add_argument('--home_path', default=home_dir, type=str,
                    help='Project home directory')
args = parser.parse_args()


loss_method = args.loss_strategy

device = torch.device(args.device)
encoder_backbone = args.encoder_backbone
experiment_description = args.experiment_description + "_" + encoder_backbone

data_type = args.selected_dataset
model_arch = args.arch
method = args.encoder_backbone  # BaseCNN BIGRU
training_mode = args.training_mode
run_description = args.run_description

logs_save_dir = args.logs_save_dir
os.makedirs(logs_save_dir, exist_ok=True)

print("data_type:", data_type)
exec(f'from config_files.{data_type}_Configs import Config as Configs')


# ##### fix random seeds for reproducibility ########
SEED = args.seed
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
#####################################################

experiment_log_dir = os.path.join(logs_save_dir, experiment_description, run_description, training_mode + f"_seed_{SEED}")
os.makedirs(experiment_log_dir, exist_ok=True)

# loop through domains
counter = 0
src_counter = 0


# Logging
log_file_name = os.path.join(experiment_log_dir, f"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log")
logger = _logger(log_file_name)
logger.debug("=" * 45)
logger.debug(f'Dataset: {data_type}')
logger.debug(f'encoder_backbone:  {encoder_backbone}')
logger.debug(f'Mode:    {training_mode}')
logger.debug("=" * 45)

# Load datasets
data_path = f"./data/{data_type}"
# from config_files.HAR_Configs import Config
# configs = Config()

from config_files.FACIES_BaseModel_Configs import Config
configs = Config()
if encoder_backbone == "BaseCNN":
    train_dl, valid_dl, test_dl = data_generator(data_path, configs, training_mode)
elif encoder_backbone == "BIGRU":
    from config_files.FACIES_Configs import Config
    configs = Config()
    train_dl, valid_dl, test_dl = data_generator(data_path, configs, training_mode)
elif encoder_backbone == "EResNet-18CapsuleNet":
    from config_files.FACIES_CapsuleNet_Configs import Config
    configs = Config()
    train_dl, valid_dl, test_dl = data_generator(data_path, configs, training_mode)
else:
    from config_files.FACIES_Configs import Config
    configs = Config()
    train_dl, valid_dl, test_dl = data_generator2(data_path, configs, training_mode)

logger.debug("Data loaded ...")


if model_arch == "TS-TCC":
    from trainer.trainer import Trainer, model_evaluate
elif model_arch == "TS-TCC-Plus":
    if loss_method == "triplet":
        from trainer.trainer_triplet_loss import Trainer, model_evaluate
    elif loss_method == "sim2":
        from trainer.trainer_sim import Trainer, model_evaluate
    elif loss_method == "sim3":
        from trainer.trainer_sim_plus import Trainer, model_evaluate
    else:
        from trainer.trainer_sim import Trainer, model_evaluate


# for HAR dataset
# model = base_Model(configs).to(device)

model = base_cnn_Model(configs).to(device)
temporal_contr_model = TC(configs, device).to(device)
# Load myModel
if method == 'BaseCNN':  # BaseCNN BIGRU
    model = base_cnn_Model(configs).to(device)
    temporal_contr_model = TC(configs, device).to(device)
    # model = base_res_cnn_Model(configs).to(device)
    summary(model,input_size=(configs.batch_size, configs.input_channels, configs.data_seq_len))  # , input_size=(32, 9, 8))
elif method == 'BIGRU':  # BaseCNN BIGRU
    model = Bigru_self_attention_model(configs).to(device)
    temporal_contr_model = TC2(configs, device).to(device)
    # print(summary(model, input_size=(128, 9, 128)))  # , input_size=(32, 9, 8))
    print((configs.batch_size, configs.seq_len, configs.input_channels))
    # summary(model, input_size=(configs.batch_size, configs.input_channels, configs.data_seq_len))
    summary(model, input_size=(configs.batch_size, configs.seq_len, configs.input_channels))  # , input_size=(32, 20, 6))
elif method == 'EResNet-18CapsuleNet':  # BaseCNN |  BIGRU | EResNet-18CapsuleNet
    model = EfficientResNet18CapsNet(configs).to(device)
    temporal_contr_model = TC(configs, device).to(device)
    summary(model, input_size=(configs.batch_size, configs.seq_len, configs.input_channels))  # , input_size=(32, 20, 6))



if training_mode == "fine_tune":
    # load saved model of this experiment
    load_from = os.path.join(os.path.join(logs_save_dir, experiment_description, run_description, f"self_supervised_seed_{SEED}", "saved_models"))
    chkpoint = torch.load(os.path.join(load_from, "ckp_last.pt"), map_location=device)
    pretrained_dict = chkpoint["model_state_dict"]
    model_dict = model.state_dict()
    del_list = ['logits']
    pretrained_dict_copy = pretrained_dict.copy()
    for i in pretrained_dict_copy.keys():
        for j in del_list:
            if j in i:
                del pretrained_dict[i]
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)

if training_mode == "train_linear" or "tl" in training_mode:
    load_from = os.path.join(os.path.join(logs_save_dir, experiment_description, run_description, f"self_supervised_seed_{SEED}", "saved_models"))
    chkpoint = torch.load(os.path.join(load_from, "ckp_last.pt"), map_location=device)
    pretrained_dict = chkpoint["model_state_dict"]
    model_dict = model.state_dict()

    # 1. filter out unnecessary keys
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}

    # delete these parameters (Ex: the linear layer at the end)
    del_list = ['logits']
    pretrained_dict_copy = pretrained_dict.copy()
    for i in pretrained_dict_copy.keys():
        for j in del_list:
            if j in i:
                del pretrained_dict[i]

    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    set_requires_grad(model, pretrained_dict, requires_grad=False)  # Freeze everything except last layer.

if training_mode == "random_init":
    model_dict = model.state_dict()

    # delete all the parameters except for logits
    del_list = ['logits']
    pretrained_dict_copy = model_dict.copy()
    for i in pretrained_dict_copy.keys():
        for j in del_list:
            if j in i:
                del model_dict[i]
    set_requires_grad(model, model_dict, requires_grad=False)  # Freeze everything except last layer.


model_optimizer = torch.optim.Adam(model.parameters(), lr=configs.lr, betas=(configs.beta1, configs.beta2), weight_decay=3e-4)
temporal_contr_optimizer = torch.optim.Adam(temporal_contr_model.parameters(), lr=configs.lr, betas=(configs.beta1, configs.beta2), weight_decay=3e-4)

if training_mode == "self_supervised":  # to do it only once
    copy_Files(os.path.join(logs_save_dir, experiment_description, run_description), data_type)

print("train_dl.batch_size:", train_dl.batch_size)
print("type(train_dl):", type(train_dl))

# Trainer
Trainer(model, temporal_contr_model, model_optimizer, temporal_contr_optimizer, train_dl, valid_dl, test_dl, device, logger, configs, experiment_log_dir, training_mode)

if training_mode != "self_supervised":
    # Testing
    outs = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode)
    total_loss, total_acc, pred_labels, true_labels = outs
    _calc_metrics(pred_labels, true_labels, experiment_log_dir, args.home_path)

logger.debug(f"Training time is : {datetime.now()-start_time}")
