
import os
import copy
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Module
from tqdm import tqdm
from utils import *
from logger import Logger
import time
import numpy as np
import warnings
import pdb
import higher

class CamModule(Module):
    def __init__(self, feat_model, classifier):
        super(CamModule, self).__init__()
        self.feat_model = feat_model
        self.classifier = classifier
        
    def forward(self, inputs):
        feats, _ = self.feat_model(inputs)
        return self.classifier(feats)[0]

class model ():
    
    def __init__(self, config, data, test=False, meta_sample=False, learner=None, add_bt=0, eval_batch=0, debug=0):

        self.meta_sample = meta_sample
        self.add_bt = add_bt
        self.eval_batch = eval_batch
        self.debug = debug
        self.gradcam = False
        if self.debug == 3:
            self.gradcam = True
            self.debug = 0
        # init meta learner and meta set
        if self.meta_sample:
            assert learner is not None
            self.learner = learner
            self.meta_data = iter(data['meta'])
        
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.config = config
        self.training_opt = self.config['training_opt']
        self.memory = self.config['memory']
        self.data = data
        self.test_mode = test
        self.num_gpus = torch.cuda.device_count()
        self.do_shuffle = config['shuffle'] if 'shuffle' in config else False

        # Compute epochs from iterations
        if self.training_opt.get('num_iterations', False):
            self.training_opt['num_epochs'] = math.ceil(self.training_opt['num_iterations'] / len(self.data['train']))
        if self.config.get('warmup_iterations', False):
            self.config['warmup_epochs'] = math.ceil(self.config['warmup_iterations'] / len(self.data['train']))

        # Setup logger
        self.logger = Logger(self.training_opt['log_dir'])
        
        # Initialize model
        self.init_models()

        # Load pre-trained model parameters
        if 'model_dir' in self.config and self.config['model_dir'] is not None:
            self.load_model(self.config['model_dir'])

        # Under training mode, initialize training steps, optimizers, schedulers, criterions, and centroids
        if not self.test_mode:

            # If using steps for training, we need to calculate training steps 
            # for each epoch based on actual number of training data instead of 
            # oversampled data number 
            print('Using steps for training.')
            self.training_data_num = len(self.data['train'].dataset)
            self.epoch_steps = int(self.training_data_num  \
                                   / self.training_opt['batch_size'])

            # Initialize model optimizer and scheduler
            print('Initializing model optimizer.')
            self.scheduler_params = self.training_opt['scheduler_params']
            self.model_optimizer, \
            self.model_optimizer_scheduler = self.init_optimizers(self.model_optim_params_list)
            self.init_criterions()
            if self.memory['init_centroids']:
                self.criterions['FeatureLoss'].centroids.data = \
                    self.centroids_cal(self.data['train_plain'])
            
            # Set up log file
            self.log_file = os.path.join(self.training_opt['log_dir'], 'log.txt')
            if os.path.isfile(self.log_file):
                os.remove(self.log_file)
            self.logger.log_cfg(self.config)
        else:
            self.scheduler_params = self.training_opt['scheduler_params']
            self.model_optimizer, \
            self.model_optimizer_scheduler = self.init_optimizers(self.model_optim_params_list)
            self.init_criterions()
            if 'KNNClassifier' in self.config['networks']['classifier']['def_file']:
                self.load_model()
                if not self.networks['classifier'].initialized:
                    cfeats = self.get_knncentroids()
                    print('===> Saving features to %s' % 
                          os.path.join(self.training_opt['log_dir'], 'cfeats.pkl'))
                    with open(os.path.join(self.training_opt['log_dir'], 'cfeats.pkl'), 'wb') as f:
                        pickle.dump(cfeats, f)
                    self.networks['classifier'].update(cfeats)
            self.log_file = None
        self.stat_grad = {}
        self.stat_grad_mat = np.zeros([self.training_opt['num_classes'], self.training_opt['num_classes']])
        self.stat_grad_count_mat = np.zeros([self.training_opt['num_classes'], self.training_opt['num_classes']])
        self.stat_attens = np.zeros([self.training_opt['num_classes'], self.training_opt['num_classes']])
        self.stat_counts = np.zeros([self.training_opt['num_classes'], self.training_opt['num_classes']])
        self.stat_grad_count = {}
        for i in range(self.training_opt['num_classes']):
            self.stat_grad[i] = 0
            self.stat_grad_count[i] = 0
        
    def init_models(self, optimizer=True):
        networks_defs = self.config['networks']
        self.networks = {}
        self.model_optim_params_list = []

        if self.meta_sample:
            # init meta optimizer
            self.optimizer_meta = torch.optim.Adam(self.learner.parameters(),
                                                   lr=self.training_opt['sampler'].get('lr', 0.01))

        print("Using", torch.cuda.device_count(), "GPUs.")
        
        for key, val in networks_defs.items():

            # Networks
            def_file = val['def_file']
            # model_args = list(val['params'].values())
            # model_args.append(self.test_mode)
            model_args = val['params']
            model_args.update({'test': self.test_mode})
            print(key, model_args)

            self.networks[key] = source_import(def_file).create_model(**model_args)
            if 'KNNClassifier' in type(self.networks[key]).__name__:
                # Put the KNN classifier on one single GPU
                self.networks[key] = self.networks[key].cuda()
            else:
                self.networks[key] = nn.DataParallel(self.networks[key]).cuda()

            if 'fix' in val and val['fix']:
                print('Freezing feature weights except for self attention weights (if exist).')
                for param_name, param in self.networks[key].named_parameters():
                    # Freeze all parameters except self attention parameters
                    if 'selfatt' not in param_name and 'fc' not in param_name:
                        param.requires_grad = False
                    # print('  | ', param_name, param.requires_grad)

            if self.meta_sample and key!='classifier':
                # avoid adding classifier parameters to the optimizer,
                # otherwise error will be raised when computing higher gradients
                continue

            # Optimizer list
            optim_params = val['optim_params']
            self.model_optim_params_list.append({'params': self.networks[key].parameters(),
                                                'lr': optim_params['lr'],
                                                'momentum': optim_params['momentum'],
                                                'weight_decay': optim_params['weight_decay']})

        if self.add_bt:
            from encoder_network import obtain_sample_relation_models
            self.encoder_layers = obtain_sample_relation_models(self.add_bt, self.training_opt['feature_dim'], self.debug)
            # self.encoder_layers.register_forward_pre_hook(lambda m, inp: inp[0].unsqueeze(1))
            # self.encoder_layers.register_forward_hook(lambda m, inp, oup: oup.squeeze(1))
            self.encoder_layers = self.encoder_layers.cuda()
            import numpy as np
            import os
            if 'DATASET_N' in os.environ and os.environ['DATASET_N'].startswith('ImageNet'):
                print('load imagenet -----')
                cls_num = np.load('/home/zhou9878/Code/RIDE-LongTailRecognition/cls_num.npy')
            else:
                print('load inature -----')
                cls_num = np.load('/home/zhou9878/Code/RIDE-LongTailRecognition/obj_cls_ina.npy')
            self.many_shot_arr = np.asarray(cls_num > 100).astype(np.float32)
            self.many_shot_arr = torch.from_numpy(self.many_shot_arr)
            if self.add_bt:
                self.model_optim_params_list.append({'params': self.encoder_layers.parameters(),
                                                     'lr': optim_params['lr'] * 0.1,
                                                     'momentum': optim_params['momentum'],
                                                     'weight_decay': optim_params['weight_decay']})
                # self.model_optim_params_list.append(
                #     {'params': self.encoder_layers.parameters(),
                #      'lr': optim_params['lr'] * 0.1,
                #      'momentum': optim_params['momentum'],
                #      'weight_decay': optim_params['weight_decay']*5,
                #      }
                # )

    def init_criterions(self):
        criterion_defs = self.config['criterions']
        self.criterions = {}
        self.criterion_weights = {}

        for key, val in criterion_defs.items():
            def_file = val['def_file']
            loss_args = list(val['loss_params'].values())

            self.criterions[key] = source_import(def_file).create_loss(*loss_args).cuda()
            self.criterion_weights[key] = val['weight']
          
            if val['optim_params']:
                print('Initializing criterion optimizer.')
                optim_params = val['optim_params']
                optim_params = [{'params': self.criterions[key].parameters(),
                                'lr': optim_params['lr'],
                                'momentum': optim_params['momentum'],
                                'weight_decay': optim_params['weight_decay']}]
                if self.add_bt:
                    # Set a smaller lr or larger weight decay or both.
                    # optim_params.append(
                    #     {'params': self.encoder_layers.parameters(),
                    #      'lr': optim_params['lr']*0.1,
                    #      'momentum': optim_params['momentum'],
                    #      'weight_decay': optim_params['weight_decay']}
                    # )
                    optim_params.append(
                        {'params': self.encoder_layers.parameters(),
                         'lr': optim_params['lr'] * 0.1,
                         'momentum': optim_params['momentum'],
                         'weight_decay': optim_params['weight_decay']*5,
                         })
                # Initialize criterion optimizer and scheduler
                self.criterion_optimizer, \
                self.criterion_optimizer_scheduler = self.init_optimizers(optim_params)
            else:
                self.criterion_optimizer = None

    def init_optimizers(self, optim_params):
        print('optimize:', optim_params)
        optimizer = optim.SGD(optim_params)
        if self.config['coslr']:
            print("===> Using coslr eta_min={}".format(self.config['endlr']))
            scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                optimizer, self.training_opt['num_epochs'], eta_min=self.config['endlr'])
        elif self.config['coslrwarmup']:
            print("===> Using coslrwarmup eta_min={}, warmup_epochs={}".format(
                self.config['endlr'],self.config['warmup_epochs']))
            scheduler = CosineAnnealingLRWarmup(
                optimizer=optimizer,
                T_max=self.training_opt['num_epochs'],
                eta_min=self.config['endlr'],
                warmup_epochs=self.config['warmup_epochs'],
                base_lr=self.config['base_lr'],
                warmup_lr=self.config['warmup_lr']
            )
        else:
            scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                  step_size=self.scheduler_params['step_size'],
                                                  gamma=self.scheduler_params['gamma'])
        return optimizer, scheduler

    def batch_forward (self, inputs, labels=None, centroids=False, feature_ext=False, phase='train', paths=None):
        '''
        This is a general single batch running function. 
        '''

        # Calculate Features
        self.features, self.feature_maps = self.networks['feat_model'](inputs)

        # If not just extracting features, calculate logits
        if not feature_ext:

            # During training, calculate centroids if needed to 
            if phase != 'test':
                if centroids and 'FeatureLoss' in self.criterions.keys():
                    self.centroids = self.criterions['FeatureLoss'].centroids.data
                    torch.cat([self.centroids] * self.num_gpus)
                else:
                    self.centroids = None

            if self.centroids is not None:
                centroids_ = torch.cat([self.centroids] * self.num_gpus)
            else:
                centroids_ = self.centroids

            self.old_features = self.features
            if (phase == 'train' and self.add_bt) or self.eval_batch > 0:
                old_x = self.features

                x = self.features.unsqueeze(1)
                if self.debug == 2:
                    x, attens = self.encoder_layers(x)
                    labels_np = labels.detach().cpu().numpy()
                    matrix = np.eye(self.training_opt['num_classes'])[labels_np]
                    attens = attens.squeeze(0)
                    attens_np = attens.detach().cpu().numpy()


                    # for inverse
                    attens_np = attens_np + attens_np.transpose()

                    self.stat_attens += np.matmul(matrix.transpose(), np.matmul(attens_np, matrix)) # 512 x 512
                    self.stat_counts += np.matmul(matrix.transpose(), np.matmul(np.ones_like(attens_np)*2, matrix))
                    # import ipdb;ipdb.set_trace()


                else:
                    x = self.encoder_layers(x)
                x = x.squeeze(1)
                self.features = x
                logits_old, direct_memory_feature_old = self.networks['classifier'](old_x, centroids_)

            # Calculate logits with classifier
            self.logits, self.direct_memory_feature = self.networks['classifier'](self.features, centroids_)
            if self.add_bt and phase == 'train':
                self.logits = torch.cat([self.logits, logits_old], dim=0)
                self.features = torch.cat([self.features, old_x], dim=0)
            if self.eval_batch:
                self.logits = (self.logits + logits_old) / 2.

    def batch_backward(self, labels=None):
        # Zero out optimizer gradients
        self.model_optimizer.zero_grad()
        if self.criterion_optimizer:
            self.criterion_optimizer.zero_grad()
        # Back-propagation from loss outputs
        if self.debug == 1:
            # import ipdb;ipdb.set_trace()
            if not self.test_mode:
                for model in self.networks.values():
                    model.eval()
            self.old_features.retain_grad()
            for i in range(len(self.loss_all[:len(self.old_features)])):
                self.loss_all[i].backward(retain_graph=True)
                grads = torch.abs(torch.cat([self.old_features.grad[:i], self.old_features.grad[i+1:]])).mean()
                self.stat_grad[int(labels[i])] += grads.item()
                self.stat_grad_count[int(labels[i])] += 1
                self.old_features.grad.zero_()
            if self.test_mode:
                return
            else:
                for model in self.networks.values():
                    model.train()
            self.model_optimizer.zero_grad()
            if self.criterion_optimizer:
                self.criterion_optimizer.zero_grad()
            self.loss.backward()
        elif self.debug == 4:
            # Gradients for other images
            if not self.test_mode:
                for model in self.networks.values():
                    model.eval()
            self.old_features.retain_grad()
            for i in range(len(self.loss_all[:len(self.old_features)])):
                self.loss_all[i].backward(retain_graph=True)
                for j in range(len(self.loss_all[:len(self.old_features)])):
                    if i != j:
                        grads = torch.abs(self.old_features.grad[j]).mean()
                        self.stat_grad_mat[int(labels[i])][int(labels[j])] += grads.item()
                        self.stat_grad_count_mat[int(labels[i])][int(labels[j])] += 1
                self.old_features.grad.zero_()
            if self.test_mode:
                return
            else:
                for model in self.networks.values():
                    model.train()
            self.model_optimizer.zero_grad()
            if self.criterion_optimizer:
                self.criterion_optimizer.zero_grad()
            self.loss.backward()
        else:
            self.loss.backward()
        # Step optimizers
        self.model_optimizer.step()
        if self.criterion_optimizer:
            self.criterion_optimizer.step()

    def batch_loss(self, labels):
        self.loss = 0
        if self.add_bt:
            self.loss_perf = self.criterions['PerformanceLoss'](self.logits[:self.logits.shape[0]//2], labels[:self.logits.shape[0]//2])
            self.loss_perf *=  self.criterion_weights['PerformanceLoss']
            self.loss += self.loss_perf

            self.loss_perf = self.criterions['PerformanceLoss'](self.logits[self.logits.shape[0]//2:], labels[self.logits.shape[0]//2:])
            self.loss_perf *=  self.criterion_weights['PerformanceLoss']
            self.loss += self.loss_perf


        # First, apply performance loss
        if 'PerformanceLoss' in self.criterions.keys():
            if self.debug in [1, 4]:
                self.loss_all = self.criterions['PerformanceLoss'](self.logits, labels, 'none')
                self.loss_all *=  self.criterion_weights['PerformanceLoss']
                self.loss_perf = torch.mean(self.loss_all)
            else:
                self.loss_perf = self.criterions['PerformanceLoss'](self.logits, labels)
            self.loss_perf *=  self.criterion_weights['PerformanceLoss']
            self.loss += self.loss_perf

        # Apply loss on features if set up
        if 'FeatureLoss' in self.criterions.keys():
            self.loss_feat = self.criterions['FeatureLoss'](self.features, labels)
            self.loss_feat = self.loss_feat * self.criterion_weights['FeatureLoss']
            # Add feature loss to total loss
            self.loss += self.loss_feat
    
    def shuffle_batch(self, x, y):
        index = torch.randperm(x.size(0))
        x = x[index]
        y = y[index]
        return x, y

    def meta_forward(self, inputs, labels, verbose=False):
        # take a meta step in the inner loop
        self.learner.train()
        self.model_optimizer.zero_grad()
        self.optimizer_meta.zero_grad()
        if self.add_bt:
            network = torch.nn.Sequential(torch.nn.Unflatten(1, [1, -1]), self.encoder_layers, torch.nn.Flatten(1, -1), self.networks['classifier'])
        else:
            network = self.networks['classifier']
        with higher.innerloop_ctx(network, self.model_optimizer) as (fmodel, diffopt):
            # obtain the surrogate model
            features, _ = self.networks['feat_model'](inputs)
            if self.add_bt:
                train_outputs, _ = fmodel[-1](features.detach())
            else:
                train_outputs, _ = fmodel(features.detach())
            loss = self.criterions['PerformanceLoss'](train_outputs, labels, reduction='none')
            loss = self.learner.forward_loss(loss)
            diffopt.step(loss)

            # use the surrogate model to update sample rate
            val_inputs, val_targets, _ = next(self.meta_data)
            val_inputs = val_inputs.cuda()
            val_targets = val_targets.cuda()
            features, _ = self.networks['feat_model'](val_inputs)
            val_outputs, _ = fmodel(features.detach())
            val_loss = F.cross_entropy(val_outputs, val_targets, reduction='mean')
            val_loss.backward()
            self.optimizer_meta.step()

        self.learner.eval()

        if verbose:
            # log the sample rates
            num_classes = self.learner.num_classes
            prob = self.learner.fc[0].weight.sigmoid().squeeze(0)
            print_str = ['Unnormalized Sample Prob:']
            interval = 1 if num_classes < 10 else num_classes // 10
            for i in range(0, num_classes, interval):
                print_str.append('class{}={:.3f},'.format(i, prob[i].item()))
            max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
            print_str.append('\nMax Mem: {:.0f}M'.format(max_mem_mb))
            print_write(print_str, self.log_file)

    def train(self):
        # When training the network
        print_str = ['Phase: train']
        print_write(print_str, self.log_file)
        time.sleep(0.25)

        print_write(['Do shuffle??? --- ', self.do_shuffle], self.log_file)

        # Initialize best model
        best_model_weights = {}
        best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
        best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
        if self.add_bt:
            best_model_weights['bt'] = copy.deepcopy(self.encoder_layers.state_dict())
        best_acc = 0.0
        best_epoch = 0
        # best_centroids = self.centroids

        end_epoch = self.training_opt['num_epochs']

        # Loop over epochs
        for epoch in range(1, end_epoch + 1):
            for model in self.networks.values():
                model.train()

            torch.cuda.empty_cache()
            
            # Set model modes and set scheduler
            # In training, step optimizer scheduler and set model to train() 
            self.model_optimizer_scheduler.step()
            if self.criterion_optimizer:
                self.criterion_optimizer_scheduler.step()

            # Iterate over dataset
            total_preds = []
            total_labels = []

            for step, (inputs, labels, indexes) in enumerate(self.data['train']):
                # Break when step equal to epoch step
                if step == self.epoch_steps:
                    break
                if self.do_shuffle:
                    inputs, labels = self.shuffle_batch(inputs, labels)
                inputs, labels = inputs.cuda(), labels.cuda()

                # If on training phase, enable gradients
                with torch.set_grad_enabled(True):
                    if self.meta_sample:
                        # do inner loop
                        self.meta_forward(inputs, labels, verbose=step % self.training_opt['display_step'] == 0)

                    # If training, forward with loss, and no top 5 accuracy calculation
                    self.batch_forward(inputs, labels,
                                       centroids=self.memory['centroids'],
                                       phase='train')
                    if self.add_bt:
                        labels = torch.cat([labels, labels], dim=0)
                    if len(self.logits) > len(labels):
                        labels = torch.cat([labels, labels], dim=0)

                    self.batch_loss(labels)
                    self.batch_backward(labels)

                    # Tracking predictions
                    _, preds = torch.max(self.logits, 1)
                    total_preds.append(torch2numpy(preds))
                    total_labels.append(torch2numpy(labels))

                    # Output minibatch training results
                    if step % self.training_opt['display_step'] == 0:

                        minibatch_loss_feat = self.loss_feat.item() \
                            if 'FeatureLoss' in self.criterions.keys() else None
                        minibatch_loss_perf = self.loss_perf.item() \
                            if 'PerformanceLoss' in self.criterions else None
                        minibatch_loss_total = self.loss.item()
                        minibatch_acc = mic_acc_cal(preds, labels)

                        print_str = ['Epoch: [%d/%d]'
                                     % (epoch, self.training_opt['num_epochs']),
                                     'Step: %5d'
                                     % (step),
                                     'Minibatch_loss_feature: %.3f'
                                     % (minibatch_loss_feat) if minibatch_loss_feat else '',
                                     'Minibatch_loss_performance: %.3f'
                                     % (minibatch_loss_perf) if minibatch_loss_perf else '',
                                     'Minibatch_accuracy_micro: %.3f'
                                      % (minibatch_acc)]
                        print_write(print_str, self.log_file)

                        loss_info = {
                            'Epoch': epoch,
                            'Step': step,
                            'Total': minibatch_loss_total,
                            'CE': minibatch_loss_perf,
                            'feat': minibatch_loss_feat
                        }

                        self.logger.log_loss(loss_info)

                # Update priority weights if using PrioritizedSampler
                # if self.training_opt['sampler'] and \
                #    self.training_opt['sampler']['type'] == 'PrioritizedSampler':
                if hasattr(self.data['train'].sampler, 'update_weights'):
                    if hasattr(self.data['train'].sampler, 'ptype'):
                        ptype = self.data['train'].sampler.ptype
                    else:
                        ptype = 'score'
                    ws = get_priority(ptype, self.logits.detach(), labels)
                    # ws = logits2score(self.logits.detach(), labels)
                    inlist = [indexes.cpu().numpy(), ws]
                    if self.training_opt['sampler']['type'] == 'ClassPrioritySampler':
                        inlist.append(labels.cpu().numpy())
                    self.data['train'].sampler.update_weights(*inlist)
                    # self.data['train'].sampler.update_weights(indexes.cpu().numpy(), ws)

            if hasattr(self.data['train'].sampler, 'get_weights'):
                self.logger.log_ws(epoch, self.data['train'].sampler.get_weights())
            if hasattr(self.data['train'].sampler, 'reset_weights'):
                self.data['train'].sampler.reset_weights(epoch)

            # After every epoch, validation
            rsls = {'epoch': epoch}
            rsls_train = self.eval_with_preds(total_preds, total_labels)
            rsls_eval = self.eval(phase='val')
            rsls.update(rsls_train)
            rsls.update(rsls_eval)

            # Reset class weights for sampling if pri_mode is valid
            if hasattr(self.data['train'].sampler, 'reset_priority'):
                ws = get_priority(self.data['train'].sampler.ptype,
                                  self.total_logits.detach(),
                                  self.total_labels)
                self.data['train'].sampler.reset_priority(ws, self.total_labels.cpu().numpy())

            # Log results
            self.logger.log_acc(rsls)

            # Under validation, the best model need to be updated
            if self.eval_acc_mic_top1 > best_acc:
                best_epoch = epoch
                best_acc = self.eval_acc_mic_top1
                best_centroids = self.centroids
                best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
                best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
                if self.add_bt > 0 and self.add_bt < 50:
                    best_model_weights['bt'] = copy.deepcopy(self.encoder_layers.state_dict())
            
            print('===> Saving checkpoint')
            self.save_latest(epoch)
            if self.debug == 1:
                print(self.stat_grad)
                print(self.stat_grad_count)
            if self.debug == 4 and epoch % (end_epoch // 10) == 0:
                np.save(self.training_opt['log_dir']+'/stat_grad_mat_{}.npy'.format(epoch), self.stat_grad_mat)
                np.save(self.training_opt['log_dir']+'/stat_grad_count_mat_{}.npy'.format(epoch), self.stat_grad_count_mat)
            if self.debug == 2 and epoch % (end_epoch // 10) == 0:
                np.save(self.training_opt['log_dir']+'/stat_attens_{}.npy'.format(epoch), self.stat_attens)
                np.save(self.training_opt['log_dir']+'/stat_counts_{}.npy'.format(epoch), self.stat_counts)

        print()
        print('Training Complete.')

        print_str = ['Best validation accuracy is %.3f at epoch %d' % (best_acc, best_epoch)]
        print_write(print_str, self.log_file)
        # Save the best model and best centroids if calculated
        self.save_model(epoch, best_epoch, best_model_weights, best_acc, centroids=best_centroids)

        # Test on the test set
        self.reset_model(best_model_weights)
        self.eval('test' if 'test' in self.data else 'val')
        print('Done')
    
    def eval_with_preds(self, preds, labels):
        # Count the number of examples
        n_total = sum([len(p) for p in preds])

        # Split the examples into normal and mixup
        normal_preds, normal_labels = [], []
        mixup_preds, mixup_labels1, mixup_labels2, mixup_ws = [], [], [], []
        for p, l in zip(preds, labels):
            if isinstance(l, tuple):
                mixup_preds.append(p)
                mixup_labels1.append(l[0])
                mixup_labels2.append(l[1])
                mixup_ws.append(l[2] * np.ones_like(l[0]))
            else:
                normal_preds.append(p)
                normal_labels.append(l)
        
        # Calculate normal prediction accuracy
        rsl = {'train_all':0., 'train_many':0., 'train_median':0., 'train_low': 0.}
        if len(normal_preds) > 0:
            normal_preds, normal_labels = list(map(np.concatenate, [normal_preds, normal_labels]))
            n_top1 = mic_acc_cal(normal_preds, normal_labels)
            n_top1_many, \
            n_top1_median, \
            n_top1_low, = shot_acc(normal_preds, normal_labels, self.data['train'])
            rsl['train_all'] += len(normal_preds) / n_total * n_top1
            rsl['train_many'] += len(normal_preds) / n_total * n_top1_many
            rsl['train_median'] += len(normal_preds) / n_total * n_top1_median
            rsl['train_low'] += len(normal_preds) / n_total * n_top1_low

        # Calculate mixup prediction accuracy
        if len(mixup_preds) > 0:
            mixup_preds, mixup_labels, mixup_ws = \
                list(map(np.concatenate, [mixup_preds*2, mixup_labels1+mixup_labels2, mixup_ws]))
            mixup_ws = np.concatenate([mixup_ws, 1-mixup_ws])
            n_top1 = weighted_mic_acc_cal(mixup_preds, mixup_labels, mixup_ws)
            n_top1_many, \
            n_top1_median, \
            n_top1_low, = weighted_shot_acc(mixup_preds, mixup_labels, mixup_ws, self.data['train'])
            rsl['train_all'] += len(mixup_preds) / 2 / n_total * n_top1
            rsl['train_many'] += len(mixup_preds) / 2 / n_total * n_top1_many
            rsl['train_median'] += len(mixup_preds) / 2 / n_total * n_top1_median
            rsl['train_low'] += len(mixup_preds) / 2 / n_total * n_top1_low

        # Top-1 accuracy and additional string
        print_str = ['\n Training acc Top1: %.3f \n' % (rsl['train_all']),
                     'Many_top1: %.3f' % (rsl['train_many']),
                     'Median_top1: %.3f' % (rsl['train_median']),
                     'Low_top1: %.3f' % (rsl['train_low']),
                     '\n']
        print_write(print_str, self.log_file)

        return rsl

    def eval(self, phase='val', openset=False, save_feat=False):

        print_str = ['Phase: %s' % (phase)]
        print_write(print_str, self.log_file)
        time.sleep(0.25)

        if openset:
            print('Under openset test mode. Open threshold is %.1f' 
                  % self.training_opt['open_threshold'])
 
        torch.cuda.empty_cache()

        # In validation or testing mode, set model to eval() and initialize running loss/correct
        for model in self.networks.values():
            model.eval()

        self.total_logits = torch.empty((0, self.training_opt['num_classes'])).cuda()
        self.total_labels = torch.empty(0, dtype=torch.long).cuda()
        self.total_paths = np.empty(0)

        get_feat_only = save_feat
        feats_all, labels_all, idxs_all, logits_all = [], [], [], []
        feats_pre_all = []
        featmaps_all = []
        gradcam_vis = self.gradcam
        if gradcam_vis:
            target_layer = self.networks['feat_model'].module.layer3
            from pytorch_grad_cam import GradCAM
            from pytorch_grad_cam.utils.image import show_cam_on_image

            gradcam = GradCAM(CamModule(self.networks['feat_model'].module,
                                                  self.networks['classifier'].module), target_layer)
            import json
            freq_dict = json.load(open("./cls_freq/ImageNet_LT.json"))

        saved_counts = np.zeros([1000])
        # Iterate over dataset
        for inputs, labels, paths in tqdm(self.data[phase]):
            inputs, labels = inputs.cuda(), labels.cuda()

            # If on training phase, enable gradients
            with torch.set_grad_enabled(False):

                # In validation or testing
                self.batch_forward(inputs, labels, 
                                   centroids=self.memory['centroids'],
                                   phase=phase, paths=paths)
                if not get_feat_only:
                    self.total_logits = torch.cat((self.total_logits, self.logits))
                    self.total_labels = torch.cat((self.total_labels, labels))
                    self.total_paths = np.concatenate((self.total_paths, paths))

                if get_feat_only:
                    logits_all.append(self.logits.cpu().numpy())
                    feats_all.append(self.features.cpu().numpy())
                    feats_pre_all.append(self.old_features.cpu().numpy())
                    labels_all.append(labels.cpu().numpy())
                    idxs_all.append(paths.numpy())
            with torch.set_grad_enabled(True):
                if self.debug in [1, 4, 5]:
                    self.batch_forward(inputs, labels,
                                       centroids=self.memory['centroids'],
                                       phase=phase)
                    self.batch_loss(labels)
                    self.batch_backward(labels)
            if gradcam_vis:
                grayscale_cam = gradcam(input_tensor=inputs, target_category=labels)

                # In this example grayscale_cam has only one image in the batch:
                labels_np = labels.detach().cpu().numpy()
                paths_np = paths.detach().cpu().numpy()
                for bj, item in enumerate(inputs):
                    # grayscale_cam = grayscale_cam[0, :]

                    # if freq_dict[labels_np[bj]] > 20:
                    #     continue
                    if freq_dict[labels_np[bj]] < 10 or saved_counts[labels_np[bj]] > 10:
                        continue
                    saved_counts[labels_np[bj]] += 1

                    ditem = item.detach().cpu().numpy()
                    mean = np.asarray([0.4914, 0.4822, 0.4465])
                    std = np.asarray([0.2023, 0.1994, 0.2010])

                    if self.training_opt['dataset'] == 'ImageNet_LT':
                        mean = np.asarray([0.485, 0.456, 0.406])
                        std = np.asarray([0.229, 0.224, 0.225])
                    ditem = np.transpose(ditem, (1,2,0))
                    ditem = ditem * std + mean

                    ditem = ditem / ditem.max()
                    visualization = show_cam_on_image(ditem, grayscale_cam[bj], use_rgb=True)
                    import matplotlib.pyplot as plt
                    plt.figure(figsize=(30, 8))
                    plt.clf()
                    plt.subplot(1, 3, 1)
                    plt.imshow(ditem)
                    plt.subplot(1, 3, 2)
                    plt.imshow(grayscale_cam[bj]/grayscale_cam[bj].max())
                    plt.subplot(1, 3, 3)
                    plt.imshow(visualization)

# if not os.path.exists('/scratch//ZHIHOUDATA/jpgs_cam_bt_{}'.format(self.add_bt)):
                    #     os.makedirs('/scratch/ZHIHOUDATA/jpgs_cam_bt_{}'.format(self.add_bt))
                    plt.savefig('/scratch//ZHIHOUDATA/jpgs_cam_bt/{}_{}_{}_{}_{}.jpg'.format(paths_np[bj], labels_np[bj],
                                                                                             self.add_bt, self.eval_batch, int(phase !='train_plain')))
                    plt.clf()
                    plt.close('all')
        if self.debug == 1:
            print(self.stat_grad, self.stat_grad_count)
        if self.debug == 4:
            np.save(self.training_opt['log_dir']+'/stat_grad_mat_{}.npy'.format('test'), self.stat_grad_mat)
            np.save(self.training_opt['log_dir']+'/stat_grad_count_mat_{}.npy'.format('test'), self.stat_grad_count_mat)
        if self.debug == 2:
            np.save(self.training_opt['log_dir']+'/stat_attens_{}.npy'.format('test'), self.stat_attens)
            np.save(self.training_opt['log_dir']+'/stat_counts_{}.npy'.format('test'), self.stat_counts)
        if get_feat_only:
            typ = 'feat'
            if phase == 'train_plain':
                name = 'train{}_{}_all.pkl'.format(typ, self.add_bt)
            elif phase == 'test':
                name = 'test{}_{}_all.pkl'.format(typ, self.add_bt)
            elif phase == 'val':
                name = 'val{}_{}_all.pkl'.format(typ, self.add_bt)

            fname = os.path.join(self.training_opt['log_dir'], name)
            print('===> Saving feats to ' + fname)
            with open(fname, 'wb') as f:
                pickle.dump({
                             'feats_pre': np.concatenate(feats_pre_all),
                             'feats': np.concatenate(feats_all),
                             'labels': np.concatenate(labels_all),
                             'idxs': np.concatenate(idxs_all),
                            },
                            f, protocol=4) 
            return 
        probs, preds = F.softmax(self.total_logits.detach(), dim=1).max(dim=1)

        if openset:
            preds[probs < self.training_opt['open_threshold']] = -1
            self.openset_acc = mic_acc_cal(preds[self.total_labels == -1],
                                            self.total_labels[self.total_labels == -1])
            print('\n\nOpenset Accuracy: %.3f' % self.openset_acc)

        # Calculate the overall accuracy and F measurement
        self.eval_acc_mic_top1= mic_acc_cal(preds[self.total_labels != -1],
                                            self.total_labels[self.total_labels != -1])
        self.eval_f_measure = F_measure(preds, self.total_labels, openset=openset,
                                        theta=self.training_opt['open_threshold'])
        self.many_acc_top1, \
        self.median_acc_top1, \
        self.low_acc_top1, \
        self.cls_accs = shot_acc(preds[self.total_labels != -1],
                                 self.total_labels[self.total_labels != -1], 
                                 self.data['train'],
                                 acc_per_cls=True)
        # Top-1 accuracy and additional string
        print_str = ['\n\n',
                     'Phase: %s' 
                     % (phase),
                     '\n\n',
                     'Evaluation_accuracy_micro_top1: %.3f' 
                     % (self.eval_acc_mic_top1),
                     '\n',
                     'Averaged F-measure: %.3f' 
                     % (self.eval_f_measure),
                     '\n',
                     'Many_shot_accuracy_top1: %.3f' 
                     % (self.many_acc_top1),
                     'Median_shot_accuracy_top1: %.3f' 
                     % (self.median_acc_top1),
                     'Low_shot_accuracy_top1: %.3f' 
                     % (self.low_acc_top1),
                     '\n']
        
        rsl = {phase + '_all': self.eval_acc_mic_top1,
               phase + '_many': self.many_acc_top1,
               phase + '_median': self.median_acc_top1,
               phase + '_low': self.low_acc_top1,
               phase + '_fscore': self.eval_f_measure}

        if phase == 'val':
            print_write(print_str, self.log_file)
        else:
            acc_str = ["{:.1f} \t {:.1f} \t {:.1f} \t {:.1f}".format(
                self.many_acc_top1 * 100,
                self.median_acc_top1 * 100,
                self.low_acc_top1 * 100,
                self.eval_acc_mic_top1 * 100)]
            if self.log_file is not None and os.path.exists(self.log_file):
                print_write(print_str, self.log_file)
                print_write(acc_str, self.log_file)
            else:
                print(*print_str)
                print(*acc_str)
        
        if phase == 'test':
            with open(os.path.join(self.training_opt['log_dir'], 'cls_accs.pkl'), 'wb') as f:
                pickle.dump(self.cls_accs, f)
        return rsl
            
    def centroids_cal(self, data, save_all=False):

        centroids = torch.zeros(self.training_opt['num_classes'],
                                   self.training_opt['feature_dim']).cuda()

        print('Calculating centroids.')

        torch.cuda.empty_cache()
        for model in self.networks.values():
            model.eval()

        feats_all, labels_all, idxs_all = [], [], []

        # Calculate initial centroids only on training data.
        with torch.set_grad_enabled(False):
            for inputs, labels, idxs in tqdm(data):
                inputs, labels = inputs.cuda(), labels.cuda()

                # Calculate Features of each training data
                self.batch_forward(inputs, feature_ext=True)
                # Add all calculated features to center tensor
                for i in range(len(labels)):
                    label = labels[i]
                    centroids[label] += self.features[i]
                # Save features if requried
                if save_all:
                    feats_all.append(self.features.cpu().numpy())
                    labels_all.append(labels.cpu().numpy())
                    idxs_all.append(idxs.numpy())
        
        if save_all:
            fname = os.path.join(self.training_opt['log_dir'], 'feats_all.pkl')
            with open(fname, 'wb') as f:
                pickle.dump({'feats': np.concatenate(feats_all),
                             'labels': np.concatenate(labels_all),
                             'idxs': np.concatenate(idxs_all)},
                            f)
        # Average summed features with class count
        centroids /= torch.tensor(class_count(data)).float().unsqueeze(1).cuda()

        return centroids

    def get_knncentroids(self):
        datakey = 'train_plain'
        assert datakey in self.data

        print('===> Calculating KNN centroids.')

        torch.cuda.empty_cache()
        for model in self.networks.values():
            model.eval()

        feats_all, labels_all = [], []

        # Calculate initial centroids only on training data.
        with torch.set_grad_enabled(False):
            for inputs, labels, idxs in tqdm(self.data[datakey]):
                inputs, labels = inputs.cuda(), labels.cuda()

                # Calculate Features of each training data
                self.batch_forward(inputs, feature_ext=True)

                feats_all.append(self.features.cpu().numpy())
                labels_all.append(labels.cpu().numpy())
        
        feats = np.concatenate(feats_all)
        labels = np.concatenate(labels_all)

        featmean = feats.mean(axis=0)

        def get_centroids(feats_, labels_):
            centroids = []        
            for i in np.unique(labels_):
                centroids.append(np.mean(feats_[labels_==i], axis=0))
            return np.stack(centroids)
        # Get unnormalized centorids
        un_centers = get_centroids(feats, labels)
    
        # Get l2n centorids
        l2n_feats = torch.Tensor(feats.copy())
        norm_l2n = torch.norm(l2n_feats, 2, 1, keepdim=True)
        l2n_feats = l2n_feats / norm_l2n
        l2n_centers = get_centroids(l2n_feats.numpy(), labels)

        # Get cl2n centorids
        cl2n_feats = torch.Tensor(feats.copy())
        cl2n_feats = cl2n_feats - torch.Tensor(featmean)
        norm_cl2n = torch.norm(cl2n_feats, 2, 1, keepdim=True)
        cl2n_feats = cl2n_feats / norm_cl2n
        cl2n_centers = get_centroids(cl2n_feats.numpy(), labels)

        return {'mean': featmean,
                'uncs': un_centers,
                'l2ncs': l2n_centers,   
                'cl2ncs': cl2n_centers}
    
    def reset_model(self, model_state):
        for key, model in self.networks.items():
            weights = model_state[key]
            weights = {k: weights[k] for k in weights if k in model.state_dict()}
            model.load_state_dict(weights)

    def load_model(self, model_dir=None):
        model_dir = self.training_opt['log_dir'] if model_dir is None else model_dir
        if not model_dir.endswith('.pth'):
            model_dir = os.path.join(model_dir, 'final_model_checkpoint.pth')
        
        print('Validation on the best model.')
        print('Loading model from %s' % (model_dir))
        
        checkpoint = torch.load(model_dir)
        if 'state_dict_best' in checkpoint:
            model_state = checkpoint['state_dict_best']
        else:
            model_state = checkpoint
        
        self.centroids = checkpoint['centroids'] if 'centroids' in checkpoint else None
        
        for key, model in self.networks.items():
            # if not self.test_mode and key == 'classifier':
            if not self.test_mode and \
                'DotProductClassifier' in self.config['networks'][key]['def_file']:
                # Skip classifier initialization 
                print('Skiping classifier initialization')
                continue
            weights = model_state[key]
            weights = {k: weights[k] for k in weights if k in model.state_dict()}
            x = model.state_dict()
            x.update(weights)
            model.load_state_dict(x)
        if self.add_bt > 0 and self.add_bt < 50:
            self.encoder_layers.load_state_dict(model_state['bt'])
    
    def save_latest(self, epoch):
        model_weights = {}
        model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
        model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
        if self.add_bt > 0 and self.add_bt < 50:
            model_weights['bt'] = copy.deepcopy(self.encoder_layers.state_dict())

        model_states = {
            'epoch': epoch,
            'state_dict': model_weights
        }

        model_dir = os.path.join(self.training_opt['log_dir'], 
                                 'latest_model_checkpoint.pth')
        torch.save(model_states, model_dir)
        
    def save_model(self, epoch, best_epoch, best_model_weights, best_acc, centroids=None):
        
        model_states = {'epoch': epoch,
                'best_epoch': best_epoch,
                'state_dict_best': best_model_weights,
                'best_acc': best_acc,
                'centroids': centroids}

        model_dir = os.path.join(self.training_opt['log_dir'], 
                                 'final_model_checkpoint.pth')

        torch.save(model_states, model_dir)
            
    def output_logits(self, openset=False):
        filename = os.path.join(self.training_opt['log_dir'], 
                                'logits_%s'%('open' if openset else 'close'))
        print("Saving total logits to: %s.npz" % filename)
        np.savez(filename, 
                 logits=self.total_logits.detach().cpu().numpy(), 
                 labels=self.total_labels.detach().cpu().numpy(),
                 paths=self.total_paths)
