import torch
import torch.nn as nn
import numpy as np
import os
import logging
import pytorch_lightning as pl
from transformers import get_linear_schedule_with_warmup
from xnlp.models import T5Copy, get_optimizer, T5ForConditionalGeneration
from xnlp.data import EncoderDecoderData, get_tokenizer
from bert4torch.evaluate import compute_rouge
from transformers.models.t5 import T5Config
from bert4torch.loss import lm_loss

from xnlp.loss import CopyNll, LMCE
from bert4torch.utils import round4, prepare
from xnlp.config import parser
import warnings

warnings.filterwarnings('ignore')


class TaskLightModel(pl.LightningModule):
    def __init__(self, args, tokenizer):
        super().__init__()
        self.args = args
        self.tokenizer = tokenizer
        config = T5Config.from_json_file('/home/xianglingyang/pretrained_models/torch/t5/config.json')
        # self.model = T5Copy(config)
        self.model = T5ForConditionalGeneration(config)
        # self.model.init_weights()

        # self.loss_fn = CopyNll()
        self.loss_fn = LMCE()


    def forward(self, **inputs):
        return self.model(**inputs)

    def training_step(self, batch, batch_idx):
        logits = self(**batch).logits
        # print(logits.shape)
        # print(logits)
        loss = self.loss_fn(logits, batch['decoder_input_ids'], batch['decoder_attention_mask'])
        return loss

    def predict_batch(self, batch):
        pred = self.model.generate(eos_token_id=self.tokenizer.sep_token_id,
                                   decoder_start_token_id=self.tokenizer.cls_token_id,
                                   num_beams=1,
                                   input_ids=batch['input_ids'], attention_mask=batch['attention_mask'],
                                   use_cache=True,
                                   max_length=self.args.max_target_length,
                                   # src=batch['input_ids'],
                                   )
        pred = pred[:, 1:].cpu().numpy()
        pred = self.tokenizer.batch_decode(pred, skip_special_tokens=True)
        pred = [s.replace(' ', '') for s in pred]
        return pred

    def predict_step(self, batch, batch_idx, dataloader_idx=None):
        return self.predict_batch(batch)

    def validation_step(self, batch, batch_idx):
        """"""
        if self.current_epoch < self.args.eval_start:
            return {'val_rouge': 0}
        pred = self.predict_batch(batch)
        if self.local_rank == 0:
            print(pred)
        label = batch['decoder_input_ids'][:, 1:].cpu().numpy()
        label = self.tokenizer.batch_decode(label, skip_special_tokens=True)
        label = [s.replace(' ', '') for s in label]
        rouge = compute_rouge(label, pred)

        return {'val_rouge': rouge}

    def validation_epoch_end(self, outputs):
        if self.current_epoch < self.args.eval_start:
            return {'val_rouge': 0}
        rouge = np.mean([x['val_rouge'] for x in outputs])
        self.log('val_rouge', rouge, prog_bar=True)
        return {'val_rouge': rouge}

    def configure_optimizers(self):
        # for p in self.model.parameters():
        #     p.requires_grad = False
        # for p in self.model.generator.parameters():
        #     p.requires_grad = True
        optimizer = get_optimizer(self.model, self.args.lr, self.args.weight_decay)
        if self.args.max_epochs == -1:
            t_total = self.args.max_steps // self.args.accumulate_grad_batches
        else:
            t_total = len(self.train_dataloader()) // self.args.accumulate_grad_batches * self.args.max_epochs
        if self.args.warmup_steps != -1:
            warmup_steps = self.args.warmup_steps
        else:
            warmup_steps = int(self.args.warmup_proportion * t_total)
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                    num_training_steps=t_total)
        return [optimizer], [{"scheduler": scheduler, "interval": "step"}]


if __name__ == '__main__':

    args = parser.parse_args()
    if args.max_steps == -1:
        del args.max_steps
    if args.gpus == 0:
        del args.plugins
    prepare(args)
    base_name = '{}-{}'.format(os.path.split(args.model_path)[-1],
                               os.path.split(__file__)[-1][:-3])
    tokenizer = get_tokenizer('jieba')
    data = EncoderDecoderData(args, tokenizer)
    dataloaders = data.get_dataloader()

    for fold in range(args.kfold):
        pl.seed_everything(args.seed + fold)
        train_data, dev_data = dataloaders['train'][fold], dataloaders['dev'][fold]
        model = TaskLightModel(args, tokenizer)
        checkpoint = pl.callbacks.ModelCheckpoint(
            monitor="val_rouge",
            dirpath=args.save_dir,
            filename='{}-{}-'.format(fold, base_name) + "{epoch:02d}-{val_rouge:.4f}",
            mode="max",
            save_weights_only=True
        )
        trainer = pl.Trainer.from_argparse_args(args, callbacks=[checkpoint], logger=False)
        trainer.fit(model, train_data, dev_data)
        del model
        del trainer
        torch.cuda.empty_cache()
