import json
import os

import nltk
import numpy as np
import torch
from sklearn.metrics import classification_report

from tqdm import tqdm
from datasets import load_metric

from constants import DOMAIN_SLOT_TAG, VALUE_SLOT_TAG
from src.utils import shift_tokens_left, extract_triplet, multilabel_to_onehot


class Trainer:
    def __init__(self, args, model, tokenizer, labels_map, train_loader, dev_loader,
                 loss_function, optimizer, scheduler=None, device=None):

        self.args = args
        self.model = model
        self.tokenizer = tokenizer
        self.labels_map = labels_map
        self.train_loader = train_loader
        self.dev_loader = dev_loader
        self.loss_function = loss_function
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.device = device

    def forward(self, inputs, labels) -> dict:
        """
        Method for the forward pass.
        'training_step', 'validation_step' and 'test_step' should call
        this method in order to compute the output predictions and the loss.

        Returns:
            output_dict: forward output containing the predictions (output logits ecc...) and the loss if any.

        """
        outputs = self.model(**inputs, return_dict=True, output_hidden_states=True)
        logits = outputs["logits"]
        loss = self.loss_function(logits.view(-1, logits.shape[-1]), labels.view(-1))

        output_dict = {'loss': loss, 'logits': logits}

        return output_dict

    def train(self):
        self.model.train()
        best_val_f1 = 0

        for e in range(self.args.epochs):
            pbar = tqdm(self.train_loader, desc='Training')

            for batch, labels in pbar:
                batch.pop('token_type_ids')
                batch["decoder_input_ids"] = torch.where(labels != -100, labels, self.tokenizer.pad_token_id)
                for k, v in batch.items():
                    batch[k] = v.squeeze().to(self.device)
                labels = labels.squeeze().to(self.device)
                labels = shift_tokens_left(labels, -100)
                output_dict = self.forward(batch, labels)
                loss = output_dict['loss']
                pbar.set_postfix(loss=loss.item())

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

            valid_report = self.eval()
            micro_f1 = valid_report['micro avg']['f1-score']
            if micro_f1 > best_val_f1:
                print('current best valid f1 is %.5f' % micro_f1)
                best_val_f1 = micro_f1

                model_output_path = os.path.join(self.args.output_dir, f'model_{micro_f1}.bin')
                report_output_path = os.path.join(self.args.output_dir, f'report_{micro_f1}.json')
                torch.save(self.model.state_dict(), model_output_path)
                json.dump(valid_report, open(report_output_path, 'w', encoding='utf-8'), ensure_ascii=False)

    def eval(self):
        self.model.eval()

        gen_kwargs = {
            "max_length": self.args.max_target_length
            if self.args.max_target_length is not None else self.args.max_length,
            "early_stopping": False,
            "no_repeat_ngram_size": 0,
            "length_penalty": 0,
            "num_beams": self.args.eval_beams if self.args.eval_beams is not None else self.args.num_beams
        }
        pbar = tqdm(self.dev_loader, desc='Evaluating')

        generated_tokens_lst, labels_lst = [], []
        count, loss_sum = 0, 0

        for data, labels in pbar:
            for k, v in data.items():
                data[k] = v.squeeze().to(self.device)

            generated_tokens = self.model.generate(
                data["input_ids"],
                attention_mask=data["attention_mask"],
                **gen_kwargs
            )

            # in case the batch is shorter than max length, the output should be padded
            if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
                generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])

            labels = labels.squeeze().to(self.device)
            data["decoder_input_ids"] = torch.where(labels != -100, labels, self.tokenizer.pad_token_id)
            data.pop("token_type_ids")
            labels = shift_tokens_left(labels, -100)
            with torch.no_grad():
                # compute loss on predict data
                forward_output = self.forward(data, labels)
                loss = forward_output['loss']
            loss_sum += loss.item()
            count += 1

            generated_tokens_lst.append(generated_tokens)
            labels_lst.append(labels)

        all_generated_tokens = torch.cat(generated_tokens_lst, dim=0)
        all_labels = torch.cat(labels_lst, dim=0)

        if self.args.prediction_loss_only:
            print('val_loss', loss_sum / count)
            return

        if all_labels.shape[-1] < gen_kwargs['max_length']:
            all_labels = self._pad_tensors_to_max_len(all_labels, gen_kwargs["max_length"])

        if self.args.predict_with_generate:
            metrics = self.compute_metrics(all_generated_tokens.detach().cpu(),
                                           all_labels.detach().cpu())
        else:
            metrics = {}

        return metrics

    def _pad_tensors_to_max_len(self, token_ids, max_length):
        pad_token_id = self.tokenizer.pad_token_id

        padded_tensor = pad_token_id * torch.ones(
            (token_ids.shape[0], max_length), dtype=token_ids.dtype, device=token_ids.device
        )

        padded_tensor[:, : token_ids.shape[-1]] = token_ids
        return padded_tensor

    def compute_metrics(self, predictions, labels):
        predictions = self.tokenizer.batch_decode(predictions)
        labels = self.tokenizer.batch_decode(labels)

        predictions = extract_triplet(predictions, self.tokenizer)
        labels = extract_triplet(labels, self.tokenizer)

        predictions_index = [multilabel_to_onehot(pred, self.labels_map) for pred in predictions]
        labels_index = [multilabel_to_onehot(label, self.labels_map) for label in labels]

        report = classification_report(labels_index, predictions_index, target_names=self.labels_map.keys(),
                                       zero_division=1, output_dict=True)
        # micro_f1 = f1_score(labels_index, predictions_index, average='micro')
        return report
