import json
from torch.optim import AdamW
import pytorch_lightning as pl
from transformers import T5ForConditionalGeneration


class T5(pl.LightningModule):
    def __init__(self, args, tokenizer, evaluator):
        super(T5, self).__init__()

        self.args = args
        self.tokenizer = tokenizer
        self.model = T5ForConditionalGeneration.from_pretrained(args.model_dir)
        self.evaluator = evaluator

    def training_step(self, batch, batch_idx):
        input_ids = batch['input_ids']
        attention_mask = batch['attention_mask']
        labels = batch['labels']

        loss = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).loss
        self.log('train/loss', loss)
        return {
            'loss': loss
        }

    def validation_step(self, batch, batch_idx):
        input_ids = batch['input_ids']
        attention_mask = batch['attention_mask']

        preds = self.model.generate(input_ids=input_ids,
                                    attention_mask=attention_mask,
                                    max_length=self.args.max_length)
        preds = self.tokenizer.batch_decode(preds,
                                            skip_special_tokens=True,
                                            clean_up_tokenization_spaces=False)

        return {
            'preds': preds,
            'golds': batch['golds']
        }

    def validation_epoch_end(self, outputs):
        preds = list()
        golds = list()

        for output in outputs:
            preds.extend(output['preds'])
            golds.extend(output['golds'])

        scores = self.evaluator.evaluate(preds, golds, 'dev')
        for key, value in scores.items():
            self.log(f'dev/{key}', value)

        with open('preds.json', 'w') as file:
            json.dump(preds, file)

    def test_step(self, batch, batch_idx):
        return self.validation_step(batch, batch_idx)

    def test_epoch_end(self, outputs):
        preds = list()
        golds = list()

        for output in outputs:
            preds.extend(output['preds'])
            golds.extend(output['golds'])

        scores = self.evaluator.evaluate(preds, golds, 'test')
        for key, value in scores.items():
            self.log(f'test/{key}', value)

    def configure_optimizers(self):
        optimizer = AdamW(self.model.parameters(), lr=self.args.lr)
        return optimizer
