import os
import json
import argparse
from tqdm import tqdm

import utils.tool
from utils.dataset import collate_fn_test
from metrics.reddit.evaluator import EvaluateTool

import wandb
import torch
from torch.utils.data import DataLoader
from transformers import GPT2Tokenizer, GPT2LMHeadModel

parser = argparse.ArgumentParser()
parser.add_argument('--cache_dir', type=str, default=os.path.join('output', 'cache'))
parser.add_argument('--forward_model_dir', '-fmd', type=str, default='/Users/weiyun/Projects/parameters/DialoGPT')
parser.add_argument('--backward_model_dir', '-bmd', type=str, default='/Users/weiyun/Projects/parameters/DialoGPT')

parser.add_argument('--dataset', type=str, default='reddit', choices=['reddit', 'dstc7'])
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--input_max_length', type=int, default=128)
parser.add_argument('--generation_max_length', type=int, default=1000)
parser.add_argument('--num_beams', type=int, default=1)

parser.set_defaults(wandb=True)
parser.add_argument('--no-wandb', dest='wandb', action='store_false')
parser.add_argument('--project', type=str, default='DialogT5_downstream')
parser.add_argument('--entity', type=str, default='weiyun')


class Processor:
    def __init__(self, args):
        self.args = args

        self.tokenizer = GPT2Tokenizer.from_pretrained(args.forward_model_dir)
        self.forward_model = GPT2LMHeadModel.from_pretrained(args.forward_model_dir).to(args.device)
        # self.backward_model = GPT2LMHeadModel.from_pretrained(args.backward_model_dir)

        self.test_dataloader = self._get_test_dataloader()
        self.evaluator = EvaluateTool(args)

        if args.wandb:
            wandb.init(project=args.project, name=args.name, entity=args.entity, config=args)
        else:
            print('args:')
            for key, value in vars(args).items():
                print(f'\t{key}: {value}')

    def _get_test_dataloader(self):
        test_dataset = self.args.test_dataset(corpus_file=self.args.loader_path,
                                              tokenizer=self.tokenizer,
                                              max_length=self.args.input_max_length)

        test_dataloader = DataLoader(dataset=test_dataset, batch_size=self.args.batch_size, collate_fn=collate_fn_test)
        return test_dataloader

    def eval(self, section='test'):
        self.forward_model.eval()
        # self.backward_model.eval()

        preds_list = list()
        golds_list = list()
        with torch.no_grad():
            for batch in tqdm(self.test_dataloader):
                input_ids = batch['input_ids'].to(self.args.device)
                attention_mask = batch['attention_mask'].to(self.args.device)

                preds = self.forward_model.generate(input_ids=input_ids,
                                                    attention_mask=attention_mask,
                                                    pad_token_id=self.tokenizer.eos_token_id,
                                                    max_length=self.args.generation_max_length,
                                                    # no_repeat_ngram_size=3,
                                                    # early_stopping=True,
                                                    num_beams=self.args.num_beams)
                preds = preds[:, input_ids.shape[1]:]
                preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)

                preds_list.extend(preds)
                golds_list.extend(batch['golds'])

        pred_save_path = f'dialogpt_{self.args.name}_preds_{section}.json'
        with open(pred_save_path, 'w') as file:
            json.dump(preds_list, file)

        scores, _ = self.evaluator.evaluate(preds_list, golds_list, section)
        print(scores)
        if self.args.wandb:
            wandb.save(pred_save_path)

            for key, value in scores.items():
                wandb.log({f'{section}/{key}': value})


def main():
    args = parser.parse_args()
    args.name = f'DialoGPT_{args.dataset}_{args.num_beams}'
    args.loader_path = f'./data/{args.dataset}/test.refs.txt'
    args.test_dataset = utils.tool.get_test_dataset(f'seq2seq_construction.{args.dataset}')

    if torch.cuda.is_available():
        args.device = 'cuda'
    else:
        args.device = 'cpu'

    processor = Processor(args)
    processor.eval()


if __name__ == '__main__':
    main()
