import datetime

import torch
import torch.nn as nn
import torch.nn.functional as F


from torch.nn import CrossEntropyLoss
from transformers import BertModel, AutoConfig, AutoModelWithLMHead


class BertEncoder(nn.Module):
    def __init__(self, args, tokenizer):
        super().__init__()
        self.args = args
        self.tokenizer = tokenizer
        self.model = BertModel.from_pretrained(args.base_model_path)
        self.dropout = nn.Dropout(0.1)

        hidden_size = self.model.config.hidden_size
        self.head = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.GELU(),
            nn.Linear(hidden_size, self.args.feature_dim)
        )

    def forward(self, input_ids, attention_mask):
        time_input = datetime.datetime.now()
        hidden = self.model(input_ids, attention_mask=attention_mask)['last_hidden_state']
        time_output = datetime.datetime.now()
        print("bert time: ", time_output - time_input)


        feature = self.head(hidden)
        feature = F.normalize(feature, p=2, dim=-1)
        hidden = self.dropout(hidden)
        output = (hidden, feature)
        return output


    def get_low_dim_feature(self, hidden):
        feature = self.head(hidden)
        feature = F.normalize(feature, p=2, dim=-1)
        return feature


class Classifier(nn.Module):
    def __init__(self, args, hidden_dim, label_num):
        super().__init__()
        self.args = args
        self.label_num = label_num
        self.classifier = nn.Linear(hidden_dim, label_num, bias=False)
        self.loss_fn = CrossEntropyLoss()

    def forward(self, hidden, labels=None):
        logits = self.classifier(hidden)

        output = (logits,)

        if labels is not None:
            loss = self.loss_fn(logits.view(-1, self.label_num), labels.view(-1))
            output = (loss,) + output

        return output

    def weight_copy(self, prev_classifier):
        weight = prev_classifier.classifier.weight.data
        new_cls = nn.Linear(768, self.label_num, bias=False).to(self.args.device)
        new_cls.weight.data[:self.label_num - self.args.per_types] = weight
        self.classifier = new_cls


    def incremental_learning(self, seen_rel_num):
        weight = self.classifier.weight.data
        self.classifier = nn.Linear(768, seen_rel_num, bias=False).to(self.args.device)
        with torch.no_grad():
            self.classifier.weight.data[:seen_rel_num] = weight[:seen_rel_num]


class FocalLoss(nn.Module):
    def __init__(self, alpha=0.25, gamma=2.0, reduction="mean"):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction

    def forward(self, logits, targets):
        ce_loss = F.cross_entropy(logits, targets, reduction='none')
        pt = torch.exp(-ce_loss)
        focal_loss = self.alpha * (1 - pt) ** self.gamma * ce_loss
        if self.reduction == "mean":
            return focal_loss.mean()
        elif self.reduction == "sum":
            return focal_loss.sum()
        return focal_loss
