from abc import ABC, abstractclassmethod
import numpy as np
import os
import torch
from utils.consts import LOG_PATH
from utils.logger import Logger
from utils.msger import LarkMsger
from utils.tools import get_timestamp, get_device


BLANK_LINE = ' ' * 130
INTERVEL = 5


class Trainer(ABC):
    def __init__(self, task_id: str, conf: dict, title_icon: str) -> None:
        self.task_id = task_id
        self.batch = conf['batch']
        self.epoch = conf['epoch']
        self.distr = conf['distr']
        self.lr = conf['lr']
        self.dataset_name = conf['dataset']
        self.msger = LarkMsger(title_icon+task_id.upper())
        self.device = torch.device(get_device())
        self.logger = Logger(task_id)

    def train(self):
        self.start_time = get_timestamp()
        self.prepare()
        self.msger.send_msg("START TRAIN")
        for epc in range(self.epoch):
            self.single_epoch(epc+1)
        self.msger.send_msg("FINISH TRAIN")

    @abstractclassmethod
    def prepare(self):
        pass

    def single_epoch(self, epc: int):
        for step, [imgs, labels] in enumerate(self.t_loader(), 1):
            self.training_process(epc, step, imgs, labels)

    @abstractclassmethod
    def training_process(self):
        pass

    @abstractclassmethod
    def t_loader(self):
        pass

    def send_msg(self, msg: str):
        print(BLANK_LINE, end='\r')
        print(msg, end='\r')

        curtime = get_timestamp()
        if curtime - self.start_time > self.msger.NOTE_INTERVEL:
            self.msger.send_msg('\n'.join(msg.split(', ')))
            self.start_time = curtime


class ClasificationTrainer(Trainer):
    def __init__(self, task_id: str, conf: dict, title_icon: str) -> None:
        super().__init__(task_id, conf, title_icon)
        self.backbone = conf['backbone']
        self.model_name = f"{LOG_PATH}/{self.task_id}-{self.backbone}"
        self.net = None
        self.tr_loader = None
        self.vl_loader = None
        self.cls_dict = None
        self.num_cls = None

    def prepare(self):
        self.tr_steps = len(self.tr_loader)
        self.vl_steps = len(self.vl_loader)
        self.intervel = [round(self.tr_steps*(i+1)/INTERVEL)
                         for i in range(INTERVEL)]

        self.optim = torch.optim.Adam(self.net.parameters(), lr=self.lr)

        self.net.to(self.device)
        self.tr_loss = 0.0
        self.max_acc = [0.0, 0]
        self.max_bottom_recall = 0.0
        self.max_bottom_precision = 0.0
        self.metrics = None

    def train(self):
        super().train()
        res_model_path = f'{self.model_name}-{self.max_acc[0]*100:.0f}.pth'
        os.rename(f'{self.model_name}.pth', res_model_path)
        res_dict = {
            "max_acc": self.max_acc[0],
            "best_step": self.max_acc[1],
            "max_bottom_recall": self.max_bottom_recall,
            "max_bottom_precision": self.max_bottom_precision,
            "distr": self.distr,
            "cls_dict": self.cls_dict,
            "model_path": res_model_path,
            "rp_mat": self.metrics['rp_mat']
        }
        return res_dict

    def t_loader(self):
        for imgs, labels in self.tr_loader:
            yield imgs, labels

    def training_process(self, epc, step, imgs, labels):
        self.net.train()
        outs = self.net(imgs.to(self.device))
        loss = self.lossfunc(outs, labels.to(self.device))
        self.optim.zero_grad()
        loss.backward()
        self.optim.step()
        self.tr_loss += loss.item()
        self.send_msg(self.get_msg(epc, step))
        if step in self.intervel:
            self.validate(epc, step)

    def validate(self, epc, step):
        self.net.eval()
        vl_loss = 0.0
        rp_mat = np.zeros([self.num_cls, self.num_cls])
        total_step = step + (epc-1)*self.tr_steps
        tr_loss = self.tr_loss / total_step

        # validate
        with torch.no_grad():
            for vl_step, [vl_imgs, vl_labels] in enumerate(self.vl_loader, 1):
                vl_outs = self.net(vl_imgs.to(self.device))
                vl_loss += self.lossfunc(vl_outs,
                                         vl_labels.to(self.device)).item()
                # acc per class
                predict = torch.max(vl_outs, dim=1)[1]
                for i in range(len(predict)):
                    rp_mat[predict[i].item()][vl_labels[i].item()] += 1

                # show msg
                self.send_msg(self.get_msg(epc, step, vl_step))

            # all metrics
            vl_loss /= self.vl_steps
            acc = sum([rp_mat[i][i] for i in range(self.num_cls)]) / \
                len(self.vl_loader.dataset)
            with np.errstate(divide='ignore', invalid='ignore'):
                recall = [rp_mat[i][i]/rp_mat.sum(1)[i]
                          for i in range(self.num_cls)]
                precision = [rp_mat[i][i]/rp_mat.sum(0)[i]
                             for i in range(self.num_cls)]
            self.max_bottom_recall = max(min(recall), self.max_bottom_recall)
            self.max_bottom_precision = max(
                min(precision), self.max_bottom_precision)
            # log newest metrcs
            self.update_metrics(epc, step, tr_loss, vl_loss,
                                acc, recall, precision, rp_mat.tolist())

            # save model
            if acc > self.max_acc[0]:
                self.max_acc = [acc, total_step]
                torch.save(self.net.state_dict(), f'{self.model_name}.pth')

    def update_metrics(self, epc: int, step: int, tr_loss: float, vl_loss: float, acc: float, recall: list[float], precision: list[float], rp_mat: list[list[int]]):
        self.metrics = {}
        self.metrics['tr_loss'] = tr_loss
        self.metrics['vl_loss'] = vl_loss
        self.metrics['acc'] = acc
        self.metrics['recall'] = recall
        self.metrics['precision'] = precision
        self.metrics['rp_mat'] = rp_mat
        self.metrics['mbr'] = self.max_bottom_recall
        self.metrics['mbp'] = self.max_bottom_precision
        for i in range(self.num_cls):
            self.metrics[f'recall_{i}'] = recall[i]
        for i in range(self.num_cls):
            self.metrics[f'precision_{i}'] = precision[i]

        outcomes = {k: v for k, v in self.metrics.items() if k not in [
            'recall', 'precision', 'rp_mat']}
        outcomes['epc'] = epc
        outcomes['step'] = step
        outcomes['total_step'] = step + (epc-1)*self.tr_steps
        self.logger.log_outcome(outcomes)

    def get_msg(self, epc: int, step: int, vl_step: int = None):
        epoch_len, tstep_len, vstep_len = len(str(self.epoch)), len(
            str(self.tr_steps)), len(str(self.vl_steps))
        msg = f"TRN:[{epc:0>{epoch_len}}/{self.epoch}][{step:0>{tstep_len}}/{self.tr_steps}]"
        msg_list = [msg]

        if self.metrics != None:
            metrics_list = [f'{k}={self.metrics[k]:.4f}' for k in [
                'tr_loss', 'vl_loss', 'acc', 'mbr', 'mbp']]
            msg_list.append(', '.join(metrics_list))

        if vl_step != None:
            msg_list.append(f'VAL:[{vl_step:0>{vstep_len}}/{self.vl_steps}]')

        return ', '.join(msg_list)
