import os
import sys

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.loss import NTXentLoss
from mcr.custom_loss import TotalCodingRate
sys.path.append("..")


def chunk_avg(x, n_chunks=2, normalize=False):
    x_list = x.chunk(n_chunks, dim=0)
    x = torch.stack(x_list, dim=0)
    if not normalize:
        return x.mean(0)
    else:
        return F.normalize(x.mean(0), dim=1)


class Similarity_Loss(nn.Module):
    def __init__(self, ):
        super().__init__()
        pass

    def forward(self, z_list, z_avg):
        z_sim = 0
        num_patch = len(z_list)
        z_list = torch.stack(list(z_list), dim=0)
        z_avg = z_list.mean(dim=0)

        z_sim = 0
        for i in range(num_patch):
            z_sim += F.cosine_similarity(z_list[i], z_avg, dim=1).mean()

        z_sim = z_sim / num_patch
        z_sim_out = z_sim.clone().detach()

        return -z_sim, z_sim_out


def cal_TCR(z, criterion, num_patches):
    z_list = z.chunk(num_patches, dim=0)
    loss = 0
    for i in range(num_patches):
        loss += criterion(z_list[i])
    loss = loss / num_patches
    return loss


def Trainer(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, train_dl, valid_dl, test_dl, device, logger, config, experiment_log_dir, training_mode):
    # Start training
    logger.debug("Training started ....")

    criterion = nn.CrossEntropyLoss()
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')

    for epoch in range(1, config.num_epoch + 1):
        # Train and validate
        train_loss, train_acc = model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_dl, config, device, training_mode)
        valid_loss, valid_acc, _, _ = model_evaluate(model, temporal_contr_model, valid_dl, device, training_mode)
        if training_mode != 'self_supervised':  # use scheduler in all other modes.
            scheduler.step(valid_loss)

        logger.debug(f'\nEpoch : {epoch}\n'
                     f'Train Loss     : {train_loss:.4f}\t | \tTrain Accuracy     : {train_acc:2.4f}\n'
                     f'Valid Loss     : {valid_loss:.4f}\t | \tValid Accuracy     : {valid_acc:2.4f}')

    os.makedirs(os.path.join(experiment_log_dir, "saved_models"), exist_ok=True)
    chkpoint = {'model_state_dict': model.state_dict(), 'temporal_contr_model_state_dict': temporal_contr_model.state_dict()}
    torch.save(chkpoint, os.path.join(experiment_log_dir, "saved_models", f'ckp_last.pt'))

    if training_mode != "self_supervised":  # no need to run the evaluation for self-supervised mode.
        # evaluate on the test set
        logger.debug('\nEvaluate on the Test set:')
        test_loss, test_acc, _, _ = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode)
        logger.debug(f'Test loss      :{test_loss:0.4f}\t | Test Accuracy      : {test_acc:0.4f}')

    logger.debug("\n################## Training is Done! #########################")


def model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_loader, config, device, training_mode):
    total_loss = []
    total_acc = []
    model.train()
    temporal_contr_model.train()

    for batch_idx, (data, labels, aug1, aug2) in enumerate(train_loader):
        # send to device
        data, labels = data.float().to(device), labels.long().to(device)
        aug1, aug2 = aug1.float().to(device), aug2.float().to(device)

        # optimizer
        model_optimizer.zero_grad()
        temp_cont_optimizer.zero_grad()

        if training_mode == "self_supervised":
            predictions, features = model(data)
            predictions1, features1 = model(aug1)
            predictions2, features2 = model(aug2)

            # normalize projection feature vectors
            features = F.normalize(features, dim=1)
            features1 = F.normalize(features1, dim=1)
            features2 = F.normalize(features2, dim=1)

            temp_cont_loss1, temp_cont_lstm_feat1 = temporal_contr_model(features1, features2)
            temp_cont_loss2, temp_cont_lstm_feat2 = temporal_contr_model(features2, features1)
            temp_cont_loss3, temp_cont_lstm_feat3 = temporal_contr_model(features1, features)
            temp_cont_loss4, temp_cont_lstm_feat4 = temporal_contr_model(features2, features)

            # normalize projection feature vectors
            zis = temp_cont_lstm_feat1 
            zjs = temp_cont_lstm_feat2
            zjk = (temp_cont_lstm_feat3 + temp_cont_lstm_feat4)/2

            # compute loss
            lambda1 = 0.1
            lambda2 = 0.7

            nt_xent_criterion = NTXentLoss(device, config.batch_size, config.Context_Cont.temperature,
                                           config.Context_Cont.use_cosine_similarity)
            loss1 = (temp_cont_loss1 + temp_cont_loss2) * lambda1 + nt_xent_criterion(zis, zjs) * lambda2

            # 改动
            contractive_loss = Similarity_Loss()
            criterion = TotalCodingRate(eps=config.lr)

            z_proj = zjk # zjk
            z_list = z_proj.chunk(config.num_patches, dim=0)
            z_avg = chunk_avg(z_proj, config.num_patches)
            # Contractive Loss
            loss_contract, _ = contractive_loss(z_list, z_avg)
            loss_TCR = cal_TCR(z_proj, criterion, config.num_patches)


            # sim loss 1
            #       loss = loss1 + nt_xent_criterion(temp_cont_lstm_feat3, temp_cont_lstm_feat4) \
            #                    + 200 * loss_contract + 1 * loss_TCR
            loss = loss1 + nt_xent_criterion(temp_cont_lstm_feat3, temp_cont_lstm_feat4) \
                   + 200 * loss_contract + 1 * loss_TCR

        else:
            # print("data[0].shape:",data[0].shape)
            # data = data.clone().detach().requires_grad_(True)
            output = model(data.detach())

            # compute loss
            # supervised training or fine tuining
            predictions, features = output
            loss = criterion(predictions, labels)
            total_acc.append(labels.eq(predictions.detach().argmax(dim=1)).float().mean())

        total_loss.append(loss.item())
        loss.backward()
        model_optimizer.step()
        temp_cont_optimizer.step()

    total_loss = torch.tensor(total_loss).mean()

    if training_mode == "self_supervised":
        total_acc = 0
    else:
        total_acc = torch.tensor(total_acc).mean()
    return total_loss, total_acc


def model_evaluate(model, temporal_contr_model, test_dl, device, training_mode):
    model.eval()
    temporal_contr_model.eval()

    total_loss = []
    total_acc = []

    criterion = nn.CrossEntropyLoss()
    outs = np.array([])
    trgs = np.array([])

    with torch.no_grad():
        for data, labels, _, _ in test_dl:
            data, labels = data.float().to(device), labels.long().to(device)

            if training_mode == "self_supervised":
                pass
            else:
                output = model(data)

            # compute loss
            if training_mode != "self_supervised":
                predictions, features = output
                loss = criterion(predictions, labels)
                total_acc.append(labels.eq(predictions.detach().argmax(dim=1)).float().mean())
                total_loss.append(loss.item())

            if training_mode != "self_supervised":
                pred = predictions.max(1, keepdim=True)[1]  # get the index of the max log-probability
                outs = np.append(outs, pred.cpu().numpy())
                trgs = np.append(trgs, labels.data.cpu().numpy())

    if training_mode != "self_supervised":
        total_loss = torch.tensor(total_loss).mean()  # average loss
    else:
        total_loss = 0
    if training_mode == "self_supervised":
        total_acc = 0
        return total_loss, total_acc, [], []
    else:
        total_acc = torch.tensor(total_acc).mean()  # average acc
    return total_loss, total_acc, outs, trgs


def model_encoding(model, temporal_contr_model, test_dl, config, device, training_mode):

    model.eval()
    temporal_contr_model.eval()

    total_loss = []
    total_acc = []

    criterion = nn.CrossEntropyLoss()
    outs = np.array([])
    trgs = np.array([])
    outputs_weak_features = np.array([])
    outputs_strong_features = np.array([])
    outs_features = np.array([])

    with torch.no_grad():
        for data, labels, aug1, aug2 in test_dl:
            data, labels = data.float().to(device), labels.long().to(device)
            aug1, aug2 = aug1.float().to(device), aug2.float().to(device)

            if training_mode == "self_supervised":
                # 获取自监督学习方式下的表征
                predictions, features = model(data)
                predictions1, features1 = model(aug1)
                predictions2, features2 = model(aug2)

                # normalize projection feature vectors
                features = F.normalize(features, dim=1)
                features1 = F.normalize(features1, dim=1)
                features2 = F.normalize(features2, dim=1)

                temp_cont_loss1, temp_cont_lstm_feat1 = temporal_contr_model(features1, features2)
                temp_cont_loss2, temp_cont_lstm_feat2 = temporal_contr_model(features2, features1)

                # normalize projection feature vectors
                zis = temp_cont_lstm_feat1
                zjs = temp_cont_lstm_feat2

                # compute loss
                lambda1 = 0.1
                lambda2 = 0.7
                lambda3 = 0.2

                nt_xent_criterion = NTXentLoss(device, config.batch_size, config.Context_Cont.temperature,
                                               config.Context_Cont.use_cosine_similarity)
                # loss = (temp_cont_loss1 + temp_cont_loss2) * lambda1 + nt_xent_criterion(zis, zjs) * lambda2

                # 改动
                contractive_loss = Similarity_Loss()
                criterion = TotalCodingRate(eps=config.lr)

                z_proj = zis
                z_list = z_proj.chunk(config.num_patches, dim=0)
                z_avg = chunk_avg(z_proj, config.num_patches)
                # Contractive Loss
                loss_contract, _ = contractive_loss(z_list, z_avg)
                loss_TCR = cal_TCR(z_proj, criterion, config.num_patches)

                z_proj1 = zjs
                z_list1 = z_proj1.chunk(config.num_patches, dim=0)
                z_avg1 = chunk_avg(z_proj1, config.num_patches)
                # Contractive Loss
                loss_contract1, _ = contractive_loss(z_list1, z_avg1)
                loss_TCR1 = cal_TCR(z_proj1, criterion, config.num_patches)

                # loss = (temp_cont_loss1 + temp_cont_loss2) * lambda1 \
                #        + 200 * loss_contract + 1 * loss_TCR \
                #        + 200 * loss_contract1 + 1 * loss_TCR1
                loss = (temp_cont_loss1 + temp_cont_loss2) * lambda1 \
                       + lambda2 * loss_contract  \
                       + lambda3 * loss_contract1

                # zip features
                outs_features = np.append(outs_features, features.detach().cpu().numpy())
                outputs_weak_features = np.append(outputs_weak_features, zis.detach().cpu().numpy())
                outputs_strong_features = np.append(outputs_strong_features, zjs.detach().cpu().numpy())

            else:
                output = model(data)
                # compute loss
                predictions, features = output
                outs_features = np.append(outs_features, features.detach().cpu().numpy())
                loss = criterion(predictions, labels)
                total_acc.append(labels.eq(predictions.detach().argmax(dim=1)).float().mean())

                # pred value
                pred = predictions.max(1, keepdim=True)[1]  # get the index of the max log-probability
                outs = np.append(outs, pred.cpu().numpy())
                trgs = np.append(trgs, labels.data.cpu().numpy())

            total_loss.append(loss.item())

    if training_mode == "self_supervised":
        # total_acc = 0
        avg_loss = torch.tensor(total_loss).mean()  # average loss
        return total_loss, avg_loss, outs_features, outputs_weak_features, outputs_strong_features
    else:
        total_loss = torch.tensor(total_loss).mean()  # average loss
        total_acc = torch.tensor(total_acc).mean()  # average acc
        return total_loss, total_acc, outs, outs_features, trgs