import os
import json
import logging
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
import torch.nn.parallel as para
from tqdm import trange, tqdm
import random
from cpg.vocab import VocabEntry
from cpg.cpg_model import LSTMModel, TransformerModel

eval_dump_template = 'cpg_eval.{}.{}.{}.json'
class CPGTaskSetting(object):
    base_key_attrs = ['data_dir', 'model_dir', 'output_dir']
    attr_default_pairs = [
        ('train_file_name', 'train.json'),
        ('dev_file_name', 'dev.json'),
        ('test_file_name', 'test.json'),
        ('vocab_file_name', 'vocab.json'),
        ('save_cpt_flag', True),
        ('resume_latest_cpt', True),
        ('max_sent_len', 50),
        ('train_batch_size', 128),
        ('gradient_accumulation_steps', 1),
        ('eval_batch_size', 32),
        ('learning_rate', 1e-4),
        ('num_train_epochs', 100),
        ('seed', 0),
        ('no_cuda', False),
        # model parameters
        ('model_name', 'Transfm'),
        ('trainable_pe', False),
        ('format', 5),
        ('init_state_param', False),
        ('embed_size', 512),
        ('hidden_size', 512),
        ('layer_num', 2),
        ('ff_size', 1024),      # only available to the transformer model
        ('dropout', 0.2)
    ]

    def __init__(self, **kwargs):
        for key in CPGTaskSetting.base_key_attrs:
            setattr(self, key, kwargs[key])
            
        for attr, val in CPGTaskSetting.attr_default_pairs:
            if attr in kwargs:
                setattr(self, attr, kwargs[attr])
            else:
                setattr(self, attr, val)
    
    def update_by_dict(self, attr_dict):
        allowed_attr = [p[0] for p in CPGTaskSetting.attr_default_pairs]
        for attr, val in attr_dict.items():
            if attr in allowed_attr:
                setattr(self, attr, val)

    def resume_setting_from(self, task_dir):
        setting_file = os.path.join(task_dir, 'task_setting.json')
        setting_dict = json.load(open(setting_file, 'r'))
        
        self.update_by_dict(setting_dict)
            
    def dump_to(self, dir_path, file_name='task_setting.json'):
        dump_fp = os.path.join(dir_path, file_name)
        json.dump(self.__dict__, open(dump_fp, 'w'))
        
        
class CPGTask(object):
    """Chinese Poetry Generation task"""
    def __init__(self, cpg_setting, load_train=True, load_dev_test=True):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logging('Initializing {}'.format(self.__class__.__name__))
        self.setting = cpg_setting
        
        # basic task settings
        self._check_setting_validity()
        self._init_device()
        self.reset_random_seed()
        
        # inialize the vocab
        word2id = self.load_vocab_word2id(self.setting.vocab_file_name)
        self.vocab = VocabEntry(word2id)
        
        # load data
        self._load_data(load_train, load_dev_test)

        # load model
        
        if self.setting.model_name == 'LSTM':
            self.model = LSTMModel(self.setting, self.vocab, self.device)
        elif self.setting.model_name == 'Transfm':
            self.model = TransformerModel(self.setting, self.vocab, self.device)
        else:
            raise Exception('Unsupported model type {}'.format(self.setting.model_name))
        
        self.model.to(self.device)
        '''
        for k,v in self.model.named_parameters():
            print(k)
        '''
        '''
        self.train_dataset[30][0].extend(['我']*10)
        self.train_dataset[30][1].extend(['我']*10)
        info = self.model.get_eval_on_batch(self.train_dataset[:32])
        print(info)
        '''
        # prepare optimizer
        self.optimizer = optim.Adam(self.model.parameters(), 
                                    lr=self.setting.learning_rate)
                                    # weight_decay=self.setting.weight_decay)

        self.logging('Successfully initialize {}'.format(self.__class__.__name__))
        

    def train(self, save_cpt_flag=True, resume_base_epoch=None):
        self.logging('=' * 20 + 'Start Training' + '=' * 20)

        # resume_base_epoch arguments have higher priority over settings
        if resume_base_epoch is None:
            # whether to resume latest cpt when restarting, very useful for preemptive scheduling clusters
            if self.setting.resume_latest_cpt:
                resume_base_epoch = self.get_latest_cpt_epoch()
            else:
                resume_base_epoch = 0

        # resume cpt if possible
        if resume_base_epoch > 0:
            self.logging('Training starts from epoch {}'.format(resume_base_epoch))
            self.resume_cpt_at(resume_base_epoch, resume_model=True, resume_optimizer=True)
        else:
            self.logging('Training starts from scratch')

        self.base_train(
            get_loss_func = self.model.get_loss_on_batch,
            kwargs_dict1={},
            epoch_eval_func=self.resume_save_eval_at,
            kwargs_dict2={},
            base_epoch_idx=resume_base_epoch,
        )
    
    def base_train(self, get_loss_func, kwargs_dict1={},
                   epoch_eval_func=None, kwargs_dict2={}, base_epoch_idx=0):
        assert self.model is not None

        self.num_train_steps = round(
                self.setting.num_train_epochs * len(self.train_dataset) / self.setting.train_batch_size
            )

        train_batch_size = self.setting.train_batch_size

        self.logging('='*20 + 'Start Base Training' + '='*20)
        self.logging("\tTotal sentences Num = {}".format(len(self.train_dataset)))
        self.logging("\tBatch size = {}".format(self.setting.train_batch_size))
        self.logging("\tNum steps = {}".format(self.num_train_steps))
        
        # prepare data loader
        # train_indices = torch.arange(0, len(self.train_dataset), 1)
        train_indices = self.prepare_indices(self.train_dataset, self.setting.train_batch_size, rand_flag=True)
        train_dataloader = self.prepare_data_loader(
            train_indices, self.setting.train_batch_size
        )

        # enter train mode
        global_step = 0

  
        self.logging('Reach the epoch beginning')
        for epoch_idx in trange(base_epoch_idx, int(self.setting.num_train_epochs), desc="Epoch"):
            iter_desc = 'Iteration'
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0

            self.model.train()

            step_batch_iter = enumerate(tqdm(train_dataloader, desc=iter_desc))

            for step, batch in step_batch_iter:
                self.set_batch_to_device(batch)
                # forward
                batch = [self.train_dataset[b_idx.item()] for b_idx in batch]

                loss = get_loss_func(batch, **kwargs_dict1)

                if self.setting.gradient_accumulation_steps > 1:
                    loss = loss / self.setting.gradient_accumulation_steps

                # backward
                loss.backward()

                loss_scalar = loss.item()
                tr_loss += loss_scalar

                nb_tr_examples += self.setting.train_batch_size  # may not be very accurate due to incomplete batch
                nb_tr_steps += 1
                if (step + 1) % self.setting.gradient_accumulation_steps == 0:
                    self.optimizer.step()
                    self.model.zero_grad()
                    global_step += 1

            if epoch_eval_func is not None:
                epoch_eval_func(epoch_idx + 1, **kwargs_dict2)

    def resume_save_eval_at(self, epoch, resume_cpt_flag=False, save_cpt_flag=True):
        if resume_cpt_flag:
            self.resume_cpt_at(epoch)

        if save_cpt_flag:
            self.save_cpt_at(epoch)

        eval_tasks = ['dev', 'test']

        for task_idx, data_type in enumerate(eval_tasks):
            if data_type == 'test':
                dataset = self.test_dataset
            elif data_type == 'dev':
                dataset = self.dev_dataset
            else:
                raise Exception('Unsupported data type {}'.format(data_type))

            eval_dump_name = eval_dump_template.format(data_type, self.setting.model_name, epoch)
            self.eval(dataset, self.model.get_eval_on_batch, dump_eval_json_name=eval_dump_name)

    def eval(self, dataset, get_eval_on_batch, dump_eval_json_name=None, **func_kwargs):
        self.logging('=' * 20 + 'Start Evaluation' + '=' * 20)

        self.logging("\tNum examples = {}".format(len(dataset)))
        self.logging("\tBatch size = {}".format(self.setting.eval_batch_size))

        # prepare data loader
        # eval_indices = torch.arange(0, len(dataset), 1)
        eval_indices = self.prepare_indices(dataset, self.setting.eval_batch_size, rand_flag=False)
        eval_dataloader = self.prepare_data_loader(
            eval_indices, self.setting.eval_batch_size
        )

        # enter eval mode
        total_info = []
        if self.model is not None:
            self.model.eval()

        iter_desc = 'Iteration'
        for step, batch in enumerate(tqdm(eval_dataloader, desc=iter_desc)):
            self.set_batch_to_device(batch)
            batch = [dataset[b_idx.item()] for b_idx in batch]
            with torch.no_grad():
                batch_info = get_eval_on_batch(batch, **func_kwargs)
            # should return a dict with metrics
            total_info.append(batch_info)

        # TODO: need a function: reducer for better code generalization.
        reduced_info = {}
        perps = [info['perplexity'] for info in total_info]
        reduced_info['perplexity'] = sum(perps) / len(perps)
        for k,v in self.model.evalute_diversity().items():
            reduced_info[k] = v
        if dump_eval_json_name is not None:
            dump_eval_json_path = os.path.join(self.setting.output_dir, dump_eval_json_name)
            self.logging('Dumping eval results into {}'.format(dump_eval_json_name))
            json.dump(reduced_info, open(dump_eval_json_path, 'w'))

    def prepare_indices(self, dataset, batch_size, rand_flag=False):
        seven_indices = [idx for idx, poem in enumerate(dataset) if len(poem[0]) ==33]
        seven_pad_indices = np.random.choice(seven_indices, size=len(seven_indices) % batch_size).tolist()
        five_indices = [idx for idx, poem in enumerate(dataset) if len(poem[0]) ==25]
        five_pad_indices = np.random.choice(five_indices, size=len(five_indices) % batch_size).tolist()
        seven_indices.extend(seven_pad_indices)
        five_indices.extend(five_pad_indices)
        seven_indices_package = [seven_indices[i:i+batch_size] for i in range(0,len(seven_indices), batch_size)]
        five_indices_package = [five_indices[i:i+batch_size] for i in range(0,len(five_indices), batch_size)]
        indices_package = seven_indices_package + five_indices_package
        if rand_flag:
            random.shuffle(indices_package)
        return torch.Tensor([idx for package in indices_package for idx in package]).int()

    def set_batch_to_device(self, batch):
        # move mini-batch data to the proper device
        if isinstance(batch, torch.Tensor):
            batch = batch.to(self.device)
            

    def prepare_data_loader(self, dataset, batch_size, rand_flag=False):
        # prepare data loader
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                shuffle=rand_flag)
        return dataloader


    def _load_data(self, load_train, load_dev_test):
        self.logging('='*20 + 'Load Task Data' + '='*20)
        # prepare data
        if load_train:
            self.logging('Load train set')
            self.train_dataset = self.load_example_dataset(
                file_name=self.setting.train_file_name
            )
        else:
            self.logging('Do not load train data')
        if load_dev_test:
            self.logging('Load dev set')
            self.dev_dataset = self.load_example_dataset(
                file_name=self.setting.dev_file_name
            )
            self.logging('Load test set')
            self.test_dataset = self.load_example_dataset(
                file_name=self.setting.test_file_name
            )
        else:
            self.logging('Do not load dev and test set')

    def load_example_dataset(self, file_name):
        data_fp = os.path.join(self.setting.data_dir, file_name)
        if not os.path.exists(data_fp):
            raise Exception(f"File not exist: {data_fp}")
        
        data = json.load(open(data_fp, 'r'))
        
        
        # actually we don't really want to truncate a poetry, 
        # just to check if there is any invalid poetry (too long).
        truncated = 0
        truncated_data = []
        for sent in data:
            if len(sent) > self.setting.max_sent_len:
                sent = sent[:self.setting.max_sent_len]
                truncated += 1
            if len(sent.split('|')[1]) == 5:
                start_token = ['<f>']
                end_token = ['</f>']
            elif len(sent.split('|')[1]) == 7:
                start_token = ['<s>']
                end_token = ['</s>']
            else:
                raise Exception("data len error {}".format(sent))
            truncated_data.append(start_token + list(sent) + end_token)
        train_data = [(sent[:-1], sent[1:]) for sent in truncated_data]
        
        self.logging(f'{len(train_data)} sentences in total. Truncated: {truncated}.')
        return train_data

    def logging(self, msg, level=logging.INFO):
        self.logger.log(level, msg)
        
    def _init_device(self):
        self.logging('='*20 + 'Init Device' + '='*20)
        # set device
        if self.setting.no_cuda:
            self.device = torch.device("cpu")
        else:
            self.device = torch.device("cuda" if torch.cuda.is_available() and not self.setting.no_cuda else "cpu")
        self.logging("device {}".format(self.device))

    def reset_random_seed(self, seed=None):
        if seed is None:
            seed = self.setting.seed
        self.logging('='*20 + 'Reset Random Seed to {}'.format(seed) + '='*20)
        # set random seeds
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
    
    def load_vocab_word2id(self, vocab_file_name):
        # Note: the vocab.json must exist in the data_dir.
        vocab_fp = os.path.join(self.setting.data_dir, vocab_file_name)
        try:
            vocab = json.load(open(vocab_fp, 'r'))
        except:
            raise Exception(f"No vocab file in {vocab_fp}, please check the file name or run vocab.py first.")
        return vocab['sent_word2id']
    
    def _check_setting_validity(self):
        self.logging('='*20 + 'Check Setting Validity' + '='*20)
        self.logging('Setting: {}'.format(
            json.dumps(self.setting.__dict__, ensure_ascii=False, indent=2)
        ))

        # check valid grad accumulate step
        if self.setting.gradient_accumulation_steps < 1:
            raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                self.setting.gradient_accumulation_steps))
        # reset train batch size
        self.setting.train_batch_size = int(self.setting.train_batch_size
                                            / self.setting.gradient_accumulation_steps)

        # check output dir
        if os.path.exists(self.setting.output_dir) and os.listdir(self.setting.output_dir):
            self.logging("Output directory ({}) already exists and is not empty.".format(self.setting.output_dir),
                         level=logging.WARNING)
        os.makedirs(self.setting.output_dir, exist_ok=True)

        # check model dir
        if os.path.exists(self.setting.model_dir) and os.listdir(self.setting.model_dir):
            self.logging("Model directory ({}) already exists and is not empty.".format(self.setting.model_dir),
                         level=logging.WARNING)

        if self.setting.format not in [5, 7]:
            raise ValueError("Invalid format parameter: {}, should be 5 or 7".format(
                self.setting.format))
        os.makedirs(self.setting.model_dir, exist_ok=True)

    """
    Save and Load models. (also available to DDP)
    """
    def save_cpt_at(self, epoch):
        self.logging('='*20 + 'Dump Checkpoint' + '='*20)
        
        cpt_file_name='{}.cpt.{}'.format(self.setting.model_name, epoch)
        cpt_file_path = os.path.join(self.setting.model_dir, cpt_file_name)
        self.logging('Dump checkpoint into {}'.format(cpt_file_path))

        store_dict = {
            'setting': self.setting.__dict__,
        }

        if self.model:
            if isinstance(self.model, para.DataParallel) or \
                    isinstance(self.model, para.DistributedDataParallel):
                model_state = self.model.module.state_dict()
            else:
                model_state = self.model.state_dict()
            store_dict['model_state'] = model_state
        else:
            self.logging('No model state is dumped', level=logging.WARNING)

        if self.optimizer:
            store_dict['optimizer_state'] = self.optimizer.state_dict()
        else:
            self.logging('No optimizer state is dumped', level=logging.WARNING)

        if epoch:
            store_dict['epoch'] = epoch

        torch.save(store_dict, cpt_file_path)

    def resume_cpt_at(self, epoch, resume_model=True, resume_optimizer=False):
        cpt_file_name='{}.cpt.{}'.format(self.setting.model_name, epoch)
        cpt_file_path = os.path.join(self.setting.model_dir, cpt_file_name)
        
        self.logging('='*20 + 'Resume Checkpoint' + '='*20)
        
        if os.path.exists(cpt_file_path):
            self.logging('Resume checkpoint from {}'.format(cpt_file_path))
        else:
            self.logging('Checkpoint does not exist, {}'.format(cpt_file_path), level=logging.WARNING)
            return

        if torch.cuda.device_count() == 0:
            store_dict = torch.load(cpt_file_path, map_location='cpu')
        else:
            store_dict = torch.load(cpt_file_path, map_location=self.device)

        self.logging('Setting: {}'.format(
            json.dumps(store_dict['setting'], ensure_ascii=False, indent=2)
        ))

        if resume_model:
            if self.model and 'model_state' in store_dict:
                if isinstance(self.model, para.DataParallel) or \
                        isinstance(self.model, para.DistributedDataParallel):
                    self.model.module.load_state_dict(store_dict['model_state'])
                else:
                    self.model.load_state_dict(store_dict['model_state'])
                self.logging('Resume model successfully')
        else:
            self.logging('Do not resume model')

        if resume_optimizer:
            if self.optimizer and 'optimizer_state' in store_dict:
                self.optimizer.load_state_dict(store_dict['optimizer_state'])
                self.logging('Resume optimizer successfully')
        else:
            self.logging('Do not resume optimizer')

    def get_latest_cpt_epoch(self):
        prev_epochs = []
        for fn in os.listdir(self.setting.model_dir):
            if fn.startswith('{}.cpt'.format(self.setting.model_name)):
                try:
                    epoch = int(fn.split('.')[-1])
                    prev_epochs.append(epoch)
                except Exception as e:
                    continue
        prev_epochs.sort()

        if len(prev_epochs) > 0:
            latest_epoch = prev_epochs[-1]
            self.logging('Pick latest epoch {} from {}'.format(latest_epoch, str(prev_epochs)))
        else:
            latest_epoch = 0
            self.logging('No previous epoch checkpoints, just start from scratch')

        return latest_epoch
