from fastai.vision.all import Callback, accuracy, Recorder, CancelValidException
import torch
import torch.nn.functional as F
import os
import json
import os.path as osp
from fastprogress import progress_bar as pb
import numpy as np


MODEL_ROOT = os.environ['MODEL_ROOT']


class StepCallback(Callback):

    def __init__(self, model_id, f_idx, valid_df, debug=False):
        self.model_id = model_id
        self.f_idx = f_idx
        self.best_acc = float('-inf')
        self.first_log_step = False
        self.best_vl = float('inf')
        self._debug = debug
        self.valid_df = valid_df
        self.csv_file = osp.join(MODEL_ROOT, f'model_{self.model_id}_{self.f_idx}_step.csv')
        with open(self.csv_file, 'w') as f:
            f.write('epoch,step,valid_loss,valid_accuracy\n')

    def do_eval(self):
        self.learn.model.eval()
        group_idx = np.argwhere(self.valid_df['from'].values == 0)[:, 0]
        preds = torch.zeros((len(self.learn.dls.valid_ds), 5))
        bs = self.learn.dls.valid.bs
        gt = []
        with torch.no_grad():
            for b_idx, batch in pb(enumerate(self.learn.dls.valid), total=len(self.learn.dls.valid), leave=False):
                x, y = batch
                pred = self.learn.model(x)
                if isinstance(pred, tuple):
                    pred = pred[0]
                # preds += pred.topk(1).indices.view(-1).cpu().numpy().tolist()
                preds[b_idx * bs:(b_idx + 1) * bs] = pred.cpu()
                gt += y.cpu().numpy().tolist()
        gt = torch.tensor(gt)
        acc = accuracy(preds[group_idx], gt[group_idx])
        vl = F.cross_entropy(preds[group_idx], gt[group_idx])
        with open(self.csv_file, 'a') as f:
            f.write(f'{self.learn.epoch},{self.learn.iter},{vl},{acc}\n')
        if acc > self.best_acc:
            self.best_acc = acc
            self.learn._best_acc = acc
            path = osp.join(MODEL_ROOT, f'model_{self.model_id}_{self.f_idx}_ac.pth')
            torch.save(self.learn.model.state_dict(), path)
            self.learn._logger.info(
                f'Epoch: {self.learn.epoch} Step: {self.learn.iter}: Found best acc {acc} model and save to {path}')
        if vl < self.best_vl:
            self.best_vl = vl
            self.learn._best_vl = vl
            path = osp.join(MODEL_ROOT, f'model_{self.model_id}_{self.f_idx}_vl.pth')
            torch.save(self.learn.model.state_dict(), path)
            self.learn._logger.info(
                f'Epoch: {self.learn.epoch} Step: {self.learn.iter}: Found best vl {vl} model and save to {path}')
        self.learn.model.train()

    def before_batch(self):
        start_valid = 0.5 if not self._debug else 0
        self.step_ = {}
        for e in range(0, int(self.learn.n_epoch * 0.8)):
            self.step_[e] = len(self.learn.dls.train) // 3
        for e in range(int(self.learn.n_epoch * 0.8), self.learn.n_epoch + 1):
            self.step_[e] = len(self.learn.dls.train) // 10
        if not self.first_log_step:
            self.learn._logger.info(str(self.step_))
            self.first_log_step = True
        if self.learn.training and self.opt.frozen_idx == 0 and self.learn.iter == 0 and self.learn.epoch >= self.learn.n_epoch * start_valid:
            # have to do validation by myself...sad!
            self.do_eval()

    def after_batch(self):
        start_valid = 0.5 if not self._debug else 0
        if self.learn.training and self.opt.frozen_idx == 0 and (
                (self.learn.iter != 0 and self.learn.iter % self.step_[self.learn.epoch] == 0) or self.learn.iter == len(
                self.dls.train) - 1) and self.learn.epoch >= self.learn.n_epoch * start_valid:
            # have to do validation by myself...sad!
            self.do_eval()


class ModelInfoCallback(Callback):

    def before_fit(self):
        self.learn._logger.info(
            f'Trainable Parameters: {sum([x.numel() for x in self.learn.model.parameters() if x.requires_grad])} Total Parameters: {sum([x.numel() for x in self.learn.model.parameters()])}')
        self.learn._logger.info(f'Hyper Opt: {self.learn.opt.hypers}')
        self.learn._logger.info('Opt Param Group:')
        for group in self.learn.opt.param_groups:
            self.learn._logger.info(f"Parameter Count: {len(group['params'])} LR: {group['lr']}")


class LogMetricsCallback(Callback):
    run_after = Recorder

    #     def before_batch(self):
    #         step_ = len(self.dls.train) // 10
    #         if self.learn.training and self.recorder.lrs and self.iter % step_ == 0:
    #             self.learn._logger.info(f'Step {self.iter}: Current LR = {self.recorder.lrs[-1]}')

    def after_epoch(self):
        # 'lrs','iters','losses','values'
        #         print(self.recorder.lrs)
        #         print(self.recorder.iters)
        #         print(self.recorder.losses)
        #         print(self.recorder.values)
        try:
            metric = dict(zip(self.recorder.metric_names[1:-1], self.recorder.values[-1]))
            metric['epoch'] = self.learn.epoch
            # print(self.recorder.metric_names, self.recorder.values)
            self.learn._logger.info(json.dumps(metric))
        except IndexError:
            self.learn._logger.warn(f'(LogMetricsCallback) Failed to log metrics: {self.recorder.values}')


class EarlyStoppingException(Exception):
    pass


class FineTuneEarlyStoppingCallback(Callback):

    def __init__(self, threshold):
        self.threshold = threshold

    def before_fit(self):
        if self.opt.frozen_idx == 0:
            metric = dict(zip(self.recorder.metric_names[1:-1], self.recorder.values[-1]))
            print('=================', metric, '=====================')
            if metric['accuracy'] < self.threshold:
                raise EarlyStoppingException('Fine-tuning first stage early stop')


class GroupMetricsCallback(Callback):
    
    def __init__(self, valid_df):
        self.valid_df = valid_df
        self.prediction = []
        
    def before_validate(self):
        self.prediction = []
        
    def after_pred(self):
        if type(self.prediction) == list:
            self.prediction.append(self.pred.detach().cpu())

    def after_validate(self):
        self.prediction = torch.cat(self.prediction).float()
        assert self.prediction.shape[0] == self.valid_df.shape[0]
        for i in range(3):
            group_idx = np.argwhere(self.valid_df['from'].values == i)[:, 0]
            label = torch.tensor(self.valid_df.iloc[group_idx]['label'].values)
            group_acc = accuracy(self.prediction[group_idx], label)
            group_loss = F.cross_entropy(self.prediction[group_idx], label)
            self.learn._logger.info(f'Group {i}: samples: {len(group_idx)}, accuracy = {group_acc}, loss = {group_loss}')
        
