from torch.utils.tensorboard import SummaryWriter
from core.utils import logging
from core.dataloader import TripletDataloader
from core.model.embedding.word2vec import GloveWord2Vector
from core.model.tokenizer.tokenizer import WhitespaceTokenizer
from core.model.span_level_model import SpanLevelModel
import os
import pickle
from core.utils.utils import get_time, gold_labels, log_likelihood, metrics
import torch
import datetime

class BaseRunner:
    '''
    It is a class that used to train model, test model and so on.
    
    Args:
        model: 
        dataloader: 
    Examples:
        >>> runner = BaseRunner()
        >>> runner.run()
    '''

    def __init__(self, train: dict = {}, data: dict = {}, log: dict = {}, inference: dict = {}, is_inference: bool = False, resume_path: str = "./checkpoint/model.pkl", save_path: str = "./checkpoint") -> None:
        SEED = 1024
        torch.manual_seed(SEED) # ensure that the results of each random generation are consistent
        self.is_inference = is_inference

        self.tokenizer = WhitespaceTokenizer()
        self.train_cfg = train
        self.data_cfg = data
        self.log_cfg = log
        self.inference_cfg = inference
        self.set_default_cfg()

        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        logging.info(f'using device:{self.device}')
        self.resume_path = resume_path
        self.save_path = save_path
        self.embedder = GloveWord2Vector(self.data_cfg.get('cropus_path', './dataset/cropus/42B_w2v.txt'))
        self.dataloader = TripletDataloader(
            self.data_cfg.get('dataset_root') + '/train_triplets.txt', 
            self.data_cfg.get('batch_size', 8), 
            tokenizer=self.tokenizer, 
            embedder=self.embedder,
            device=self.device
            )
        self.valid_dataloader = None
        self.test_dataloader = None
        self.model = SpanLevelModel(self.embedder.glove_model.vector_size, device=self.device)
        self.optimizer = torch.optim.AdamW(self.model.parameters(), train.get('lr', 1e-3))
        tensorboard_name = self.train_cfg.get('tensorboard_name', '')
        self.writer = SummaryWriter(f'runs/{tensorboard_name}')

        self.max_iters = self.train_cfg.get('max_iters', 50000)
        self.max_epochs = self.train_cfg.get('max_epochs', 5000)
        self.iter = 0
        self.epoch = 0
        self.metric = {}
        self.best_f1 = 0
        self.mode = 'train'
        # self._epoch_span_p = 0
        # self._epoch_span_r = 0
        # self._epoch_span_f1 = 0
        # self._epoch_span_loss = 0
        # self._epoch_rel_p = 0
        # self._epoch_rel_r = 0
        # self._epoch_rel_f1 = 0
        # self._epoch_rel_loss = 0
        # self._epoch_loss = 0

        self.resume()
        self.move_to_device()

    def set_default_cfg(self):
        # set default value about data config
        self.data_cfg.setdefault('batch_size', 1)
        self.data_cfg.setdefault('dataset_root', './dataset/ASTE-Data-V2-EMNLP2020/15res')

        # set default value about train config
        self.train_cfg.setdefault('lr', 1e-3)
        self.train_cfg.setdefault('weight_decay', 0.01)
        self.train_cfg.setdefault('accum_step', 2)
        
        # set default value about log
        self.log_cfg.setdefault('interval', 100)

        # set default value about inference
        self.inference_cfg.setdefault('infer_times', 5)

        logging.info(f'train_cfg:{self.train_cfg}')
        logging.info(f'data_cfg:{self.data_cfg}')
        logging.info(f'log_cfg:{self.log_cfg}')
        if self.is_inference:
            logging.info(f'inference_cfg:{self.inference_cfg}')
    
    def move_to_device(self):
        if self.device == 'cuda':
            self.model.cuda()
        else:
            self.model.cpu()

    def run(self):
        if self.is_inference:
            logging.info('inference mode')
            self.inference()
        else:
            logging.info('train mode')
            self.train()
    
    def after_epoch(self):
        self.save_model()
        self.validation()
        
        # # TODO: to use simplified way to get the epoch metric, such as using dict
        # self.metric['epoch'] = self.epoch
        # self.metric['span_p'] = self._epoch_span_p
        # self.metric['span_r'] = self._epoch_span_r
        # self.metric['span_f1'] = self._epoch_span_f1
        # self.metric['span_loss'] = self._epoch_span_loss
        # self.metric['relation_p'] = self._epoch_rel_p
        # self.metric['relation_r'] = self._epoch_rel_r
        # self.metric['relation_f1'] = self._epoch_rel_f1
        # self.metric['relation_loss'] = self._epoch_rel_loss
        # self.metric['loss'] = self._epoch_loss
        # self.log_metric(self.metric)

        # self._epoch_span_p = 0
        # self._epoch_span_r = 0
        # self._epoch_span_f1 = 0
        # self._epoch_span_loss = 0
        # self._epoch_rel_p = 0
        # self._epoch_rel_r = 0
        # self._epoch_rel_f1 = 0
        # self._epoch_rel_loss = 0
        # self._epoch_loss = 0
    
    @get_time('iter_time', False)
    def train_iteration(self):
        # read data, foward
        data = self.dataloader.next_batch_data()
        if data is not None:
            batch_data, spans, span_labels, relations, relation_labels, sequence_length = data
        else:
            self.after_epoch()
            self.epoch += 1
            self.dataloader.reset()
            batch_data, spans, span_labels, relations, relation_labels, sequence_length = self.dataloader.next_batch_data()
        spans_probs, span_indices, relations_probs, candidate_indices = self.model(batch_data)
        batch_size, max_span_num, _ = spans_probs.size()

        # compute loss, backward and optimize params
        gold_span_indices, gold_span_labels = gold_labels(span_indices, spans, span_labels)
        loss_span = log_likelihood(spans_probs, span_indices, gold_span_indices, gold_span_labels)
        precision_ner, recall_ner, f1_ner = metrics(spans_probs,
                                                        torch.tensor(gold_span_labels, device=self.device))
        gold_relation_indices, gold_relation_labels = gold_labels(candidate_indices, relations, relation_labels)
        loss_relation = log_likelihood(relations_probs, candidate_indices, gold_relation_indices,
                                        gold_relation_labels)
        precision_relation, recall_relation, f1_relation = metrics(relations_probs,
                                                                        torch.tensor(gold_relation_labels,
                                                                                    device=self.device))
        loss = 0.2 * loss_span + loss_relation
        loss = loss / self.train_cfg.get('accum_step')
        loss.backward()
        torch.nn.utils.clip_grad_norm_(parameters=self.model.parameters(), max_norm=4, norm_type=2)
        if torch.any(torch.isnan(loss)):
            if not op.path.exist('inputs_data.pkl'):
                inputs_data = {
                    'input_data': data,
                    'loss': loss,
                    'loss_span': loss_span,
                    'loss_relation': loss_relation
                }
                with open('inputs_data.pkl', 'wb') as f:
                    pickle.dump(data, f)
            logging.warning('loss is nan, please check it!They have been saved in ./inputs_data.pkl')
            return

        # add metric in self.log_metric
        log_interval = self.log_cfg.get('interval')
        self.metric['epoch'] = self.epoch
        self.metric['iter'] = self.iter
        self.metric['batch_size'] = self.data_cfg['batch_size'] * self.train_cfg['accum_step']
        self.metric['span_P'] = self.metric.get('span_P', 0) + precision_ner / log_interval
        self.metric['span_R'] = self.metric.get('span_R', 0) + recall_ner / log_interval
        self.metric['span_F1'] = self.metric.get('span_F1', 0) + f1_ner / log_interval
        self.metric['loss_span'] = self.metric.get('loss_span', 0) + loss_span / log_interval
        self.metric['relation_P'] = self.metric.get('relation_P', 0) + precision_relation / log_interval
        self.metric['relation_R'] = self.metric.get('relation_R', 0) + recall_relation / log_interval
        self.metric['relation_F1'] = self.metric.get('relation_F1', 0) + f1_relation / log_interval
        self.metric['loss_relation'] = self.metric.get('loss_relation', 0) + loss_relation / log_interval
        self.metric['loss'] = self.metric.get('loss', 0) + loss / log_interval

        # self._epoch_span_p += precision_ner
        # self._epoch_span_r += recall_ner
        # self._epoch_span_f1 += f1_ner
        # self._epoch_span_loss += loss_span
        # self._epoch_rel_p += precision_relation
        # self._epoch_rel_r += recall_relation
        # self._epoch_rel_f1 += f1_relation
        # self._epoch_rel_loss += loss_relation
        # self._epoch_loss += loss

        if self.iter % self.train_cfg.get('accum_step') != 0:
            return
        self.optimizer.step()
        self.optimizer.zero_grad()
    
    def after_train_iteration(self):
        if self.iter % self.log_cfg.get('interval') == 0:
            eta = (self.max_iters - self.iter) * self.metric.get('iter_time', 0)
            self.metric['eta'] = str(datetime.timedelta(seconds=int(eta)))
            self.add_to_tensorboard()
            self.log_metric(self.metric)

        self.iter += 1
    
    
    def after_train(self):
        self.writer.close()
    
    def train(self):
        self.model.train()
        while self.iter < self.max_iters and self.epoch < self.max_epochs:
            self.train_iteration()
            self.after_train_iteration()
        self.after_train()

    
    def save_model(self, is_save_best: bool = False, f1 = 0):
        '''save model.'''
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
        save_dict = {}
        save_dict['model'] = self.model.state_dict()
        save_dict['epoch'] = self.epoch
        save_dict['iter'] = self.iter
        save_dict['best_f1'] = self.best_f1
        if is_save_best:
            if f1 < self.best_f1:
                return
            self.best_f1 = f1
            save_path = f'{self.save_path}/model_best.pkl'
        else:
            save_path = f'{self.save_path}/model.pkl'
        torch.save(save_dict, save_path)
        logging.info(f'save model to {save_path}')

    def resume(self):
        if os.path.exists(self.resume_path):
            try:
                resume_dict = torch.load(self.resume_path)
                self.model.load_state_dict(resume_dict['model'])
                self.epoch = resume_dict.get('epoch', 0)
                self.iter = resume_dict.get('iter', 0)
                self.best_f1 = resume_dict.get('best_f1', 0)
                logging.info(f'resume model from {self.resume_path} successfuly!')
            except:
                logging.info(f'resume failed!it is possible that the file to load is empty!')
    
    @torch.no_grad()
    def validation(self):
        self.mode = 'val'
        self.model.eval()
        valid_data_path = self.data_cfg.get('dataset_root') + '/dev_triplets.txt'
        self.valid_dataloader = TripletDataloader(
            valid_data_path,
            self.data_cfg.get('batch_size', 1), 
            tokenizer=self.tokenizer, 
            embedder=self.embedder,
            device=self.device
            )
        loss_total = 0
        P_relation_total = 0
        R_relation_total = 0
        F1_realtion_total = 0
        loss_relation_total = 0
        P_span_total = 0
        R_span_total = 0
        F1_span_total = 0
        loss_span_total = 0
        count = 0
        while True:
            data = self.valid_dataloader.next_batch_data()
            if data is not None:
                batch_data, spans, span_labels, relations, relation_labels, sequence_length = data
            else:
                break
            spans_probs, span_indices, relations_probs, candidate_indices = self.model(batch_data)
            batch_size, max_span_num, _ = spans_probs.size()

            # compute loss, backward and optimize params
            gold_span_indices, gold_span_labels = gold_labels(span_indices, spans, span_labels)
            # 通过代码和运行结果老看，gold_span_indices和span_indices结果是一模一样的
            loss_span = log_likelihood(spans_probs, span_indices, gold_span_indices, gold_span_labels)
            precision_ner, recall_ner, f1_ner = metrics(spans_probs,
                                                            torch.tensor(gold_span_labels, device=self.device))
            gold_relation_indices, gold_relation_labels = gold_labels(candidate_indices, relations, relation_labels)
            loss_relation = log_likelihood(relations_probs, candidate_indices, gold_relation_indices,
                                            gold_relation_labels)
            precision_relation, recall_relation, f1_relation = metrics(relations_probs,
                                                                            torch.tensor(gold_relation_labels,
                                                                                        device=self.device))
            loss = 0.2 * loss_span + loss_relation

            # add metric in self.log_metric
            loss_total += loss
            P_span_total += precision_ner
            R_span_total += recall_ner
            F1_span_total += f1_ner
            loss_span_total += loss_span
            P_relation_total += precision_relation
            R_relation_total += recall_relation
            F1_realtion_total += f1_relation
            loss_relation_total += loss_relation
            count += 1

        self.metric.clear()
        self.metric['mode'] = 'val'
        self.metric['span_P'] = P_span_total / count
        self.metric['span_R'] = R_span_total / count
        self.metric['span_F1'] = F1_span_total / count
        self.metric['loss_span'] = loss_span_total / count
        self.metric['relation_P'] = P_relation_total / count
        self.metric['relation_R'] = R_relation_total / count
        eps = 1e-6
        self.metric['relation_F1'] = 2 * self.metric['relation_P'] * self.metric['relation_R'] / (self.metric['relation_P'] + self.metric['relation_R'] + eps)
        self.metric['loss_relation'] = loss_relation_total / count
        self.metric['loss'] = loss_total / count
        self.add_to_tensorboard()
        self.log_metric(self.metric)
        self.save_model(is_save_best=True, f1=F1_realtion_total / count)

        self.mode = 'train'
        self.model.train()
        

    @torch.no_grad()
    def inference(self):
        self.model.eval()
        self.test_dataloader = TripletDataloader(
            self.data_cfg.get('dataset_root') + '/test_triplets.txt', 
            self.data_cfg.get('batch_size', 1), 
            tokenizer=self.tokenizer, 
            embedder=self.embedder,
            device=self.device
            )
        loss_total = 0
        P_relation_total = 0
        R_relation_total = 0
        F1_realtion_total = 0
        loss_relation_total = 0
        P_span_total = 0
        R_span_total = 0
        F1_span_total = 0
        loss_span_total = 0
        count = 0
        infer_times = self.inference_cfg.get('infer_times')
        while infer_times:
            data = self.test_dataloader.next_batch_data()
            if data is not None:
                batch_data, spans, span_labels, relations, relation_labels, sequence_length = data 
            else:
                self.test_dataloader.reset()
                infer_times -= 1
            spans_probs, span_indices, relations_probs, candidate_indices = self.model(batch_data)
            batch_size, max_span_num, _ = spans_probs.size()

            # compute loss, backward and optimize params
            gold_span_indices, gold_span_labels = gold_labels(span_indices, spans, span_labels)
            # 通过代码和运行结果老看，gold_span_indices和span_indices结果是一抹一样的
            loss_span = log_likelihood(spans_probs, span_indices, gold_span_indices, gold_span_labels)
            precision_ner, recall_ner, f1_ner = metrics(spans_probs,
                                                            torch.tensor(gold_span_labels, device=self.device))
            gold_relation_indices, gold_relation_labels = gold_labels(candidate_indices, relations, relation_labels)
            loss_relation = log_likelihood(relations_probs, candidate_indices, gold_relation_indices,
                                            gold_relation_labels)
            precision_relation, recall_relation, f1_relation = metrics(relations_probs,
                                                                           torch.tensor(gold_relation_labels,
                                                                                        device=self.device))

            loss_total += 0.2 * loss_span + loss_relation
            P_span_total += precision_ner
            R_span_total += recall_ner
            F1_span_total += f1_ner
            loss_span_total += loss_span
            P_relation_total += precision_relation
            R_relation_total += recall_relation
            F1_realtion_total += f1_relation
            loss_relation_total += loss_relation
            count += 1

        self.metric['infer_times'] = self.inference_cfg.get('infer_times')
        self.metric['trained_epochs'] = self.epoch
        self.metric['span_P'] = P_span_total / count
        self.metric['span_R'] = R_span_total / count
        self.metric['span_F1'] = F1_span_total / count
        self.metric['loss_span'] = loss_span_total / count
        self.metric['relation_P'] = P_relation_total / count
        self.metric['relation_R'] = R_relation_total / count
        eps = 1e-6
        self.metric['relation_F1'] = 2 * self.metric['relation_P'] * self.metric['relation_R'] / (self.metric['relation_P'] + self.metric['relation_R'] + eps)
        self.metric['loss_relation'] = loss_relation_total / count
        self.metric['loss'] = loss_total / count
        self.log_metric(self.metric)

    def log_metric(self, metric: dict):
        '''log metric.'''
        log_str = ""
        highlight_values = ['val']
        for name, value in metric.items():
            tmp_str = '' 
            if isinstance(value, (int, str)):
                tmp_str = f"{name}:{value}, "
            else:
                tmp_str = "{}:{:.4f}, ".format(name, value)
            if value in highlight_values:
                tmp_str = f"\033[0;31m{tmp_str}\033[0m"
            log_str += tmp_str
        log_str = log_str[:-2]
        logging.info(log_str)
        metric.clear()
    
    def add_to_tensorboard(self):
        skip_name = ['mode', 'eta']
        for name, value in self.metric.items():
            if name in skip_name or isinstance(value, str):
                continue
            self.writer.add_scalar(f"{self.mode} {name}", value, self.iter)
