import logging
import sys
import torch
import torch.nn as nn
import os
import json
import numpy as np

from datasets.loop_marker_dataset import collate_fn, collate_fn_joint, RationaleTestDataset, RationaleTrainDataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.static_object import Fault_obj
from sklearn.metrics.pairwise import cosine_similarity

logger = logging.getLogger(__name__, )
logging.basicConfig(level=logging.INFO, stream=sys.stdout)


def make_preds_epoch(classifier: nn.Module,
                     val_dataset,
                     batch_size: int,
                     device=None,
                     criterion: nn.Module = None,
                     task_name: str = None
                     ):
    epoch_ce_loss = 0
    epoch_con_loss = 0
    epoch_soft_pred = []  # logits
    epoch_hard_pred = []  # label: max(logits)
    epoch_truth = []

    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    for batch_inputs in tqdm(val_dataloader):
        batch_inputs = tuple(t.to(device) for t in batch_inputs)
        inputs = (batch_inputs[0], batch_inputs[1])
        labels = batch_inputs[2]
        if task_name is not None:
            logits, pooled_outputs = classifier(inputs, task_name)
        else:
            logits, pooled_outputs = classifier(inputs)
        loss = criterion(logits, labels)
        hard_preds = torch.argmax(logits.float(), dim=-1)

        # fixme 0.3 -> temperature
        con_loss = contrastive_loss(0.3, pooled_outputs.cpu().detach().numpy(), labels)
        con_loss /= len(logits)
        epoch_con_loss += con_loss

        epoch_ce_loss += loss.sum().item()
        epoch_soft_pred.extend(logits)
        # fixme why .cpu()
        epoch_hard_pred.extend(hard_preds.cpu())
        epoch_truth.extend(labels)

    epoch_ce_loss /= len(val_dataloader)
    epoch_con_loss /= len(val_dataloader)
    # fixme why .item()
    epoch_hard_pred = [x.item() for x in epoch_hard_pred]
    epoch_truth = [x.item() for x in epoch_truth]
    epoch_loss = (epoch_ce_loss, epoch_con_loss)
    return epoch_loss, epoch_soft_pred, epoch_hard_pred, epoch_truth


def make_preds_epoch_ide2cls(classifier: nn.Module,
                             val_dataset: RationaleTestDataset,
                             batch_size: int,
                             device=None,
                             criterion: nn.Module = None,
                             ):
    epoch_loss = 0
    epoch_soft_pred = []  # logits
    epoch_hard_pred = []  # label: max(logits)
    epoch_truth = []
    # test_doc_ids, test_queries, test_rationales, test_cls_labels = [], [], [], []

    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn_joint)
    for ide_batch_inputs in tqdm(val_dataloader):
        ide_inputs = (ide_batch_inputs[0].to(device), ide_batch_inputs[1].to(device))
        ide_labels = ide_batch_inputs[2].to(device)
        # query_epoch = ide_batch_inputs[3]
        # cls_label_epoch = ide_batch_inputs[4]  # eg: SUPPORTS
        # doc_id_epoch = ide_batch_inputs[5]
        # sentence_epoch = ide_batch_inputs[6]

        logits, _ = classifier(ide_inputs)
        loss = criterion(logits, ide_labels)
        hard_preds = torch.argmax(logits.float(), dim=-1)
        epoch_loss += loss.sum().item()
        epoch_soft_pred.extend(logits)
        epoch_hard_pred.extend(hard_preds.cpu())
        epoch_truth.extend(ide_labels)

        # for i in range(len(logits)):
        #     if logits[i][1] > logits[i][0]:
        #         # 1 是 正例 即 rationale
        #         test_doc_ids.append(doc_id_epoch[i])
        #         test_queries.append(query_epoch[i])
        #         test_rationales.append([sentence_epoch[i]])
        #         test_cls_labels.append(cls_label_epoch[i])

    epoch_loss /= len(val_dataset)
    epoch_hard_pred = [x.item() for x in epoch_hard_pred]
    epoch_truth = [x.item() for x in epoch_truth]
    return epoch_loss, epoch_soft_pred, epoch_hard_pred, epoch_truth
    # test_doc_ids, test_queries, test_rationales, test_cls_labels


def make_preds_epoch_joint_dataset(classifier: nn.Module,
                                   val_dataset,
                                   batch_size: int,
                                   device=None,
                                   criterion: nn.Module = None,
                                   task_name: str = None
                                   ):
    epoch_ce_loss = 0
    epoch_con_loss = 0
    epoch_soft_pred = []  # logits
    epoch_hard_pred = []  # label: max(logits)
    epoch_truth = []
    test_doc_ids, test_queries, test_rationales, test_cls_labels = [], [], [], []

    # h = 0
    # j = 0

    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn_joint)
    for ide_batch_inputs in tqdm(val_dataloader):
        ide_inputs = (ide_batch_inputs[0].to(device), ide_batch_inputs[1].to(device))
        ide_labels = ide_batch_inputs[2].to(device)
        query_epoch = ide_batch_inputs[3]
        cls_label_epoch = ide_batch_inputs[4]  # eg: SUPPORTS
        doc_id_epoch = ide_batch_inputs[5]
        sentence_epoch = ide_batch_inputs[6]

        if task_name is not None:
            logits, pooled_outputs = classifier(ide_inputs, task_name)
        else:
            logits, pooled_outputs = classifier(ide_inputs)
        loss = criterion(logits, ide_labels)
        hard_preds = torch.argmax(logits.float(), dim=-1)
        epoch_ce_loss += loss.sum().item()

        # fixme 0.3 -> temperature
        con_loss = contrastive_loss(0.3, pooled_outputs.cpu().detach().numpy(), ide_labels)
        con_loss /= len(logits)
        epoch_con_loss += con_loss

        epoch_soft_pred.extend(logits)
        epoch_hard_pred.extend(hard_preds.cpu())
        epoch_truth.extend(ide_labels)

        for i in range(len(logits)):
            # if hard_preds[i] == 1:
            #     h += 1
            # if ide_labels[i] == 1:
            #     j += 1
            if hard_preds[i] == ide_labels[i] == 1:
                # 1 是 正例 即 rationale
                test_doc_ids.append(doc_id_epoch[i])
                test_queries.append(query_epoch[i])
                test_rationales.append([sentence_epoch[i]])
                test_cls_labels.append(cls_label_epoch[i])

    epoch_ce_loss /= len(val_dataloader)
    epoch_con_loss /= len(val_dataloader)
    epoch_hard_pred = [x.item() for x in epoch_hard_pred]
    epoch_truth = [x.item() for x in epoch_truth]
    epoch_loss = (epoch_ce_loss, epoch_con_loss)
    return epoch_loss, epoch_soft_pred, epoch_hard_pred, epoch_truth, \
        test_doc_ids, test_queries, test_rationales, test_cls_labels


def make_preds_epoch_joint_dataset_decode(classifier: nn.Module,
                                          val_dataset,
                                          batch_size: int,
                                          device=None,
                                          criterion: nn.Module = None,
                                          task_name: str = None,
                                          decode_path: str = None
                                          ):
    epoch_loss = 0
    epoch_soft_pred = []  # logits
    epoch_hard_pred = []  # label: max(logits)
    epoch_truth = []
    test_doc_ids, test_queries, test_rationales, test_cls_labels = [], [], [], []

    false_pos = []
    false_neg = []
    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn_joint)
    for ide_batch_inputs in tqdm(val_dataloader):
        ide_inputs = (ide_batch_inputs[0].to(device), ide_batch_inputs[1].to(device))
        ide_labels = ide_batch_inputs[2].to(device)
        query_epoch = ide_batch_inputs[3]
        cls_label_epoch = ide_batch_inputs[4]  # eg: SUPPORTS
        doc_id_epoch = ide_batch_inputs[5]
        sentence_epoch = ide_batch_inputs[6]

        if task_name is not None:
            logits, _ = classifier(ide_inputs, task_name)
        else:
            logits, _ = classifier(ide_inputs)
        loss = criterion(logits, ide_labels)
        hard_preds = torch.argmax(logits.float(), dim=-1)
        epoch_loss += loss.sum().item()
        epoch_soft_pred.extend(logits)
        epoch_hard_pred.extend(hard_preds.cpu())
        epoch_truth.extend(ide_labels)

        for i in range(len(logits)):
            # if hard_preds[i] == 1:
            #     h += 1
            # if ide_labels[i] == 1:
            #     j += 1
            if hard_preds[i] == ide_labels[i] == 1:
                # 1 是 正例 即 rationale
                test_doc_ids.append(doc_id_epoch[i])
                test_queries.append(query_epoch[i])
                test_rationales.append([sentence_epoch[i]])
                test_cls_labels.append(cls_label_epoch[i])
            if hard_preds[i] != ide_labels[i]:
                if hard_preds[i] == 0:  # False Negative
                    false_neg.append(Fault_obj(
                        query=query_epoch[i],
                        sentence=sentence_epoch[i],
                        docid=doc_id_epoch[i],
                        predict=hard_preds[i].item(),
                        target=ide_labels[i].item()
                    )._asdict()
                                      )
                else:  # False Positive
                    false_pos.append(Fault_obj(
                        query=query_epoch[i],
                        sentence=sentence_epoch[i],
                        docid=doc_id_epoch[i],
                        predict=hard_preds[i].item(),
                        target=ide_labels[i].item()
                    )._asdict()
                                     )

    res_false = json.dumps(false_neg, indent=2)
    pred_false_path = os.path.join(decode_path,
                                   'false_neg.json')
    with open(pred_false_path, 'w') as f:
        f.write(res_false)

    res_true = json.dumps(false_pos, indent=2)
    pred_true_path = os.path.join(decode_path,
                                  'false_pos.json')
    with open(pred_true_path, 'w') as f:
        f.write(res_true)

    print(f'false_pos count: {len(false_pos)}\n'
          f'false_neg count: {len(false_neg)}')
    epoch_loss /= len(val_dataset)
    epoch_hard_pred = [x.item() for x in epoch_hard_pred]
    epoch_truth = [x.item() for x in epoch_truth]
    return epoch_loss, epoch_soft_pred, epoch_hard_pred, epoch_truth, \
        test_doc_ids, test_queries, test_rationales, test_cls_labels


def contrastive_loss(temp, embedding, label):
    """calculate the contrastive loss
    """
    # cosine similarity between embeddings
    cosine_sim = cosine_similarity(embedding, embedding)
    # remove diagonal elements from matrix
    dis = cosine_sim[~np.eye(cosine_sim.shape[0], dtype=bool)].reshape(cosine_sim.shape[0], -1)
    # apply temprature to elements
    dis = dis / temp
    cosine_sim = cosine_sim / temp
    # apply exp to elements
    dis = np.exp(dis)
    cosine_sim = np.exp(cosine_sim)

    # calculate row sum
    row_sum = []
    for i in range(len(embedding)):
        row_sum.append(sum(dis[i]))
    # calculate outer sum
    contrastive_loss = 0
    for i in range(len(embedding)):
        n_i = label.tolist().count(label[i]) - 1
        inner_sum = 0
        # calculate inner sum
        for j in range(len(embedding)):
            if label[i] == label[j] and i != j:
                inner_sum = inner_sum + np.log(cosine_sim[i][j] / row_sum[i])
        if n_i != 0:
            contrastive_loss += (inner_sum / (-n_i))
        else:
            contrastive_loss += 0
    return contrastive_loss
