import os
import json

import datasets
import torch
from tqdm import tqdm
from collections import OrderedDict
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, T5ForConditionalGeneration

import wandb
from utils.scheduler import Scheduler
from utils.dataset import TokenizedDataset, collate_fn, collate_fn_test


class Processor:
    def __init__(self, args):
        self.args = args
        self.current_step = 0

        self.tokenizer = AutoTokenizer.from_pretrained(self.args.model_dir, use_fast=False)
        self.train_dataloader, self.eval_dataloader, self.test_dataloader = self._get_dataloader()

        args.num_training_steps = len(self.train_dataloader) * args.epochs // args.gradient_accumulation_steps
        args.num_warmup_steps = int(args.num_training_steps * 0.4)
        if args.wandb:
            wandb.init(project=args.project, name=args.name, entity=args.entity, config=args)
        else:
            print('args:')
            for key, value in vars(args).items():
                print(f'\t{key}: {value}')

        self.model = T5ForConditionalGeneration.from_pretrained(args.model_dir).to(args.device)
        self.optimizer, self.scheduler = self._get_optimizer_scheduler()

    def _get_dataloader(self):
        if self.args.dataset in ['kvret', 'multiwoz']:
            return self._get_dataloader_from_datasets()

        elif self.args.dataset in ['multiwoz_nlg', 'sgd_nlg']:
            return self._get_dataloader_from_torch()

        elif self.args.dataset in ['reddit', 'dstc7']:
            return self._get_only_test_dataloader()

        else:
            raise NotImplementedError()

    def _get_dataloader_from_torch(self):
        train_dataset = self.args.base_dataset(self.args, self.tokenizer, 'train')
        eval_dataset = self.args.base_dataset(self.args, self.tokenizer, 'dev')
        test_dataset = self.args.base_dataset(self.args, self.tokenizer, 'test')

        train_dataloader = DataLoader(dataset=train_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn)
        eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn)
        test_dataloader = DataLoader(dataset=test_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn)

        return train_dataloader, eval_dataloader, test_dataloader

    def _get_dataloader_from_datasets(self):
        os.makedirs(self.args.cache_dir, exist_ok=True)
        raw_datasets_split = datasets.load_dataset(path=self.args.loader_path, cache_dir=self.args.data_store_path)
        seq2seq_dataset_split = self.args.constructor.to_seq2seq(raw_datasets_split, self.args.cache_dir)

        seq2seq_train_dataset, seq2seq_eval_dataset, seq2seq_test_dataset = seq2seq_dataset_split

        train_dataset = TokenizedDataset(self.args, self.tokenizer, seq2seq_train_dataset)
        eval_dataset = TokenizedDataset(self.args, self.tokenizer, seq2seq_eval_dataset)
        test_dataset = TokenizedDataset(self.args, self.tokenizer, seq2seq_test_dataset)

        train_dataloader = DataLoader(dataset=train_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn)
        eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn)
        test_dataloader = DataLoader(dataset=test_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn)

        return train_dataloader, eval_dataloader, test_dataloader

    def _get_only_test_dataloader(self):
        test_dataset = self.args.test_dataset(corpus_file=self.args.loader_path,
                                              tokenizer=self.tokenizer,
                                              max_length=self.args.input_max_length)

        if self.args.train or self.args.eval:
            raise NotImplementedError(f'{self.args.dataset} do not support train/eval!')

        test_dataloader = DataLoader(dataset=test_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn_test)
        return test_dataloader, test_dataloader, test_dataloader

    def _get_optimizer_scheduler(self):
        all_params = set(self.model.parameters())
        lm_head_params = set(self.model.lm_head.parameters())
        params = [
            {'params': list(all_params - lm_head_params), 'lr': self.args.lr},
            {'params': list(lm_head_params), 'lr': self.args.lm_head_lr},
        ]

        optimizer = AdamW(params)
        scheduler = Scheduler(self.args, optimizer, self.model.config.d_model)

        return optimizer, scheduler

    def train(self):
        os.makedirs('output', exist_ok=True)
        ckpt_path = f'output/{self.args.name}_max_key_score.pt'
        max_key_score = -1

        curr_loss = 0
        self.current_step = 0
        for epoch in range(self.args.epochs):
            progress_bar = tqdm(self.train_dataloader, desc=f'epoch {epoch}')
            for batch in progress_bar:
                loss = self.training_step(batch=batch)
                loss = loss / self.args.gradient_accumulation_steps
                loss.backward()

                self.current_step = self.current_step + 1
                if self.current_step % self.args.gradient_accumulation_steps == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()
                    self.scheduler.step()

                curr_loss = curr_loss + loss.item() * self.args.gradient_accumulation_steps
                if self.current_step % self.args.log_every_n_steps == 0 and self.args.wandb:
                    log_dict = {
                        'train/loss': curr_loss / self.args.log_every_n_steps,
                    }

                    for idx, param_group in enumerate(self.optimizer.param_groups):
                        log_dict[f'train/lr_{idx}'] = param_group['lr']

                    wandb.log(log_dict)
                    curr_loss = 0

            key_score = self.eval(section='dev', epoch=str(epoch))
            if key_score > max_key_score:
                max_key_score = key_score
                torch.save({'state_dict': self.model.state_dict()}, ckpt_path)

        self.load(ckpt_path)

    def training_step(self, batch):
        self.model.train()

        input_ids = batch['input_ids'].to(self.args.device)
        attention_mask = batch['attention_mask'].to(self.args.device)
        labels = batch['labels'].to(self.args.device)

        loss = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).loss
        return loss

    def eval(self, section, epoch=''):
        preds_list = list()
        golds_list = list()
        dataloader = self.test_dataloader if section == 'test' else self.eval_dataloader

        self.model.eval()
        with torch.no_grad():
            for batch in tqdm(dataloader, desc=section):
                input_ids = batch['input_ids'].to(self.args.device)
                attention_mask = batch['attention_mask'].to(self.args.device)

                preds = self.model.generate(input_ids=input_ids,
                                            attention_mask=attention_mask,
                                            max_length=self.args.generation_max_length,
                                            min_length=1,
                                            no_repeat_ngram_size=self.args.no_repeat_ngram_size,
                                            early_stopping=True,
                                            num_beams=self.args.num_beams)
                preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)

                preds_list.extend(preds)
                golds_list.extend(batch['golds'])

        pred_save_path = f'{self.args.name}_preds_{section}.json'
        with open(pred_save_path, 'w') as file:
            json.dump(preds_list, file)

        scores, key_score = self.args.evaluator.evaluate(preds_list, golds_list, section)

        log_dict = dict()
        print(f'[Epoch {epoch}]{section}:')
        for key, value in scores.items():
            log_dict[f'{section}/{key}'] = value
            print(f'\t{key}: {value}')

        if self.args.wandb:
            wandb.save(pred_save_path)
            wandb.log(log_dict)

        return key_score

    def load(self, ckpt_path):
        if torch.cuda.is_available():
            map_location = None
        else:
            map_location = torch.device('cpu')

        ckpt = torch.load(ckpt_path, map_location=map_location)['state_dict']
        state_dict = OrderedDict()

        for key, value in ckpt.items():
            new_key = key.replace('_MyModel__model._DialogModel__model.', '')
            state_dict[new_key] = value

        self.model.load_state_dict(state_dict)
        print(f'load checkpoint from {ckpt_path}')

    def traverse(self):
        for batch in self.test_dataloader:
            print(batch['golds'][0]['input_text'])
            print(batch['golds'][0]['utterance'])
            break
