import torch
import torch.nn.functional as F
import numpy as np

class EDUSegmenter(object):
    def __init__(self, seg_model, config):
        self.config = config
        self.seg_model = seg_model

        self.device = torch.device("cpu")
        self.use_cuda = next(filter(lambda p: p.requires_grad, seg_model.parameters())).is_cuda
        if self.use_cuda:
            self.device = torch.device("cuda")

    def train(self):
        self.seg_model.train()

    def eval(self):
        self.seg_model.eval()

    def forward(self, inputs):
        for k, v in inputs.items():
            if isinstance(v, torch.Tensor):
                inputs[k] = v.to(self.device)

        self.logits = self.seg_model(inputs)
        return

    def labeling(self, onebatch, vocab):
        predict_label_ids = np.argmax(self.logits.cpu().detach().numpy(), axis=-1)

        batch_labels = []
        for idx, instance in enumerate(onebatch):
            word_ids = instance.tokenized_inputs.word_ids()

            label_ids = []
            pre_word_index = -1
            for idy, word_index in enumerate(word_ids):
                if word_index != pre_word_index:
                    label_ids.append( predict_label_ids[idx, idy])
                pre_word_index = word_index

            assert len(label_ids) == len(instance.labels)

            labels = vocab.id2label(label_ids)
            batch_labels.append(labels)

        return batch_labels
    
    def compute_acc(self, onebatch, vocab):
        batch_labels = self.labeling(onebatch, vocab)

        total, correct = 0, 0
        for idx, labels in enumerate(batch_labels):
            assert len(onebatch[idx].labels) == len(labels)

            for idy in range(len(labels)):
                if onebatch[idx].labels[idy] == labels[idy]:
                    correct += 1
                total += 1

        return total, correct

    def compute_loss(self, gold_labels):
        gold_labels = gold_labels.to(self.device)
        b, t, l = self.logits.size()

        loss = F.cross_entropy(
            input=self.logits.view(b * t, -1),
            target=gold_labels.view(-1),
            ignore_index=-100)

        return loss