import os
import yaml
from types import SimpleNamespace
from collections import OrderedDict

from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
#from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, WeightedRandomSampler
from torch import optim
import torchvision.transforms as T
from sklearn import metrics
import matplotlib.pyplot as plt

from .BaseTrainer import BaseTrainer
from data.datasets import get_weight_list, AortaDataset
from utils.ranger import Ranger
from utils.lr_scheduler import CosineAnnealingWithWarmUpLR
from model.SupCon import *
import data.transforms as MT
from model.vgg import vgg16_bn

class CETrainer(BaseTrainer):
    def __init__(self, opt_file='args/ce.yaml'):
        with open(opt_file) as f:
            opt = yaml.safe_load(f)
            opt = SimpleNamespace(**opt)
        self.opt = opt

        super(CETrainer, self).__init__(checkpoint_root='CE', opt=opt)
        with open(opt_file) as f:
            self.logger.info(f'{opt_file} START************************\n'
            f'{f.read()}\n'
            f'************************{opt_file} END**************************\n')

        if opt.device == 'cpu':
            self.device = torch.device('cpu')
        else:
            os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.device)
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger.info(f'Using device {self.device}')

        tt_list = [
            T.Resize(opt.image_size),
            T.CenterCrop(opt.image_size),
            T.RandomHorizontalFlip(p=0.5),
            T.RandomVerticalFlip(p=0.5),
            T.RandomApply([T.ColorJitter(0.4, 0.4)], p=0.7),
            T.RandomApply([T.RandomRotation(45, T.InterpolationMode.NEAREST)], p=0.4),
            T.ToTensor(),
        ]
        vt_list = [
            T.Resize(opt.image_size),
            T.CenterCrop(opt.image_size),
            T.ToTensor(),
        ]
        if opt.sobel:
            tt_list.append(MT.SobelChannel(3))
            vt_list.append(MT.SobelChannel(3))
        self.train_transform = T.Compose(tt_list)
        self.val_transform = T.Compose(vt_list)
        
        self.train_dataset = AortaDataset(os.path.join(opt.source, 'train'), opt.cate, self.train_transform)
        self.n_train = len(self.train_dataset)
        train_sampler = WeightedRandomSampler(get_weight_list(self.train_dataset), self.n_train)
        self.train_loader = DataLoader(self.train_dataset,
                                       batch_size=opt.batch_size,
                                       sampler=train_sampler,
                                       drop_last=False,
                                       num_workers=8, 
                                       pin_memory=True)

        self.val_dataset = AortaDataset(os.path.join(opt.source, 'val'), opt.cate, self.val_transform)
        self.n_val = len(self.val_dataset)
        self.val_loader = DataLoader(self.val_dataset,
                                     batch_size=opt.batch_size,
                                     shuffle=False,
                                     drop_last=False,
                                     num_workers=8, 
                                     pin_memory=True)

        self.test_dataset = AortaDataset(os.path.join(opt.source, 'test'), opt.cate, self.val_transform)
        self.n_test = len(self.test_dataset)
        self.test_loader = DataLoader(self.test_dataset,
                                      batch_size=opt.batch_size,
                                      shuffle=False,
                                      drop_last=False,
                                      num_workers=8, 
                                      pin_memory=True)                     

        #self.net = resnet(34, n_channels=2 if opt.sobel else 1, n_classes=opt.n_classes)
        self.net = vgg16_bn(in_channels=2 if opt.sobel else 1, num_classes=opt.n_classes)
        if opt.load_model:
            self.net.load_state_dict(torch.load(opt.load_model, map_location=self.device))
            self.logger.info(f'Model loaded from {opt.load_model}')
        self.net.to(device=self.device)
        if torch.cuda.device_count() > 1 and self.device.type != 'cpu':
            self.net = nn.DataParallel(self.net)
            self.logger.info(f'torch.cuda.device_count:{torch.cuda.device_count()}, Use nn.DataParallel')
        self.net_module = self.net.module if isinstance(self.net, nn.DataParallel) else self.net

        if opt.optimizer.lower() == 'rmsprop':
            self.optimizer = optim.RMSprop(self.net.parameters(), lr=opt.lr, weight_decay=1e-8, momentum=0.9)
        elif opt.optimizer.lower() == 'ranger':
            self.optimizer = Ranger(self.net.parameters(), lr=opt.lr, weight_decay=1e-8)
        else:
            raise NotImplementedError(f'optimizer not supported: {opt.optimizer}')
        if opt.load_optimizer:
            self.optimizer.load_state_dict(torch.load(opt.load_optimizer))
            self.logger.info(f'Optimizer loaded from {opt.load_optimizer}')
        
        self.scheduler = CosineAnnealingWithWarmUpLR(self.optimizer, T_total=opt.epochs, eta_min=opt.lr/1000, warm_up_lr=opt.lr/100, warm_up_step=opt.epochs//10)
        if opt.load_scheduler:
            self.scheduler.load_state_dict(torch.load(opt.load_scheduler))
            self.logger.info(f'Scheduler loaded from {opt.load_scheduler}')
        
        self.criterion_ce = nn.CrossEntropyLoss() if opt.n_classes > 1 else nn.BCEWithLogitsLoss()

        self.epochs = opt.epochs
        self.save_cp = opt.save_cp
        self.early_stopping = opt.early_stopping
        self.training_info = opt.info

        self.logger.info(f'''Starting training net:
        Epochs:          {opt.epochs}
        Batch size:      {opt.batch_size}
        Learning rate:   {opt.lr}
        Image size:      {opt.image_size}
        Training size:   {self.n_train}
        Validation size: {self.n_val}
        Test size:       {self.n_test}
        Checkpoints:     {opt.save_cp}
        Device:          {self.device.type}
        Data source:     {opt.source}
        Training info:   {opt.info}
    ''')

    def train(self):
        global_step = 0
        best_val_score = float('-inf')
        useless_epoch_count = 0
        for epoch in range(self.opt.start_epoch, self.epochs):
            try:
                self.net.train()
                epoch_loss = 0
                true_list = []
                pred_list = []
                pbar = tqdm(total=self.n_train, desc=f'Epoch {epoch + 1}/{self.epochs}', unit='img')
                for imgs, labels in self.train_loader:
                    global_step += 1
                    true_list += labels.tolist()
                    imgs, labels = imgs.to(self.device), labels.to(self.device)

                    preds = self.net(imgs)

                    if self.opt.n_classes > 1:
                        pred_list += preds.detach().argmax(dim=1).tolist()
                        loss = self.criterion_ce(preds, labels)
                    else:
                        pred_list += (preds.detach().squeeze(1) > 0).long().tolist()
                        loss = self.criterion_ce(preds.squeeze(1), labels.float())

                    # if self.opt.b > 0:
                    #     flood = (loss-self.opt.b).abs() + self.opt.b
                    
                    self.writer.add_scalar(f'Train/batch_loss', loss.item(), global_step)
                    epoch_loss += loss.item() * labels.size(0)
                    postfix = OrderedDict()
                    postfix['loss'] = loss.item()
                    pbar.set_postfix(postfix)

                    self.optimizer.zero_grad()
                    # if self.opt.b > 0:
                    #     flood.backward()
                    # else:
                    loss.backward()
                    # nn.utils.clip_grad_value_(self.net.parameters(), 0.1)
                    self.optimizer.step()

                    pbar.update(labels.shape[0])
                pbar.close()

                epoch_loss /= self.n_train
                self.logger.info(f'Train epoch {epoch+1} loss:{epoch_loss}')
                self.writer.add_scalar('Train/epoch_loss', epoch_loss, epoch+1)
                self.logger.info(f'Train epoch {epoch + 1} train report:\n'+metrics.classification_report(true_list, pred_list, digits=4))

                for tag, value in self.net_module.named_parameters():
                    tag = tag.replace('.', '/')
                    self.writer.add_histogram('weights/' + tag, value.data.cpu().numpy(), epoch+1)
                    if value.grad is not None:
                        self.writer.add_histogram('grads/' + tag, value.grad.data.cpu().numpy(), epoch+1)

                self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]['lr'], epoch+1)

                if self.opt.sobel:
                    self.writer.add_images('images/origin', imgs[:,0:1].cpu(), epoch+1, dataformats='NCHW')
                    self.writer.add_images('images/origin_sobel', imgs[:,1:2].cpu(), epoch+1, dataformats='NCHW')
                else:
                    self.writer.add_images('images/origin', imgs.cpu(), epoch+1, dataformats='NCHW')
                color_list = [torch.ByteTensor([0,0,0]), torch.ByteTensor([255,0,0]), torch.ByteTensor([0,255,0]), torch.ByteTensor([0,0,255])]
                labels_img = torch.zeros(labels.shape[0], 100, 100, 3, dtype = torch.uint8)
                preds_img = torch.zeros(preds.shape[0], 100, 100, 3, dtype = torch.uint8)
                preds_idx = preds.argmax(dim=1) if self.opt.n_classes > 1 else (preds.squeeze(1)>0).long()
                for category in range(1, self.opt.n_classes):
                    labels_img[labels==category] = color_list[category]
                    preds_img[preds_idx==category] = color_list[category]
                self.writer.add_images('categories/true', labels_img, epoch+1, dataformats='NHWC')
                self.writer.add_images('categories/pred', preds_img, epoch+1, dataformats='NHWC')

                mAP, val_loss, accuracy = self.evaluate()
                self.logger.info(f'Val epoch {epoch+1} mAP: {mAP}, loss: {val_loss}, accuracy: {accuracy}')
                self.writer.add_scalar('Val/mAP', mAP, epoch+1)
                self.writer.add_scalar('Val/loss', val_loss, epoch+1)
                self.writer.add_scalar('Val/accuracy', accuracy, epoch+1)

                self.scheduler.step()

                if self.save_cp:
                    if (epoch+1) % 5 == 0:
                        torch.save(self.net_module.state_dict(), self.checkpoint_dir + f'Net_epoch{epoch + 1}.pth')
                        self.logger.info(f'Checkpoint {epoch + 1} saved !')
                else:
                    torch.save(self.net_module.state_dict(), self.checkpoint_dir + 'Net_last.pth')
                    self.logger.info('Last model saved !')
                torch.save(self.optimizer.state_dict(), self.checkpoint_dir + 'Optimizer_last.pth')
                torch.save(self.scheduler.state_dict(), self.checkpoint_dir + 'Scheduler_last.pth')

                if accuracy > best_val_score:
                    best_val_score = accuracy
                    torch.save(self.net_module.state_dict(), self.checkpoint_dir + 'Net_best.pth')
                    torch.save(self.optimizer.state_dict(), self.checkpoint_dir + 'Optimizer_best.pth')
                    torch.save(self.scheduler.state_dict(), self.checkpoint_dir + 'Scheduler_best.pth')
                    self.logger.info('Best model saved !')
                    useless_epoch_count = 0
                else:
                    useless_epoch_count += 1

                if self.early_stopping and useless_epoch_count == self.early_stopping:
                    self.logger.info(f'There are {useless_epoch_count} useless epochs! Early Stop Training!')
                    break

            except KeyboardInterrupt:
                self.logger.info('Receive KeyboardInterrupt, stop training...')
                pbar.close()
                break

        self.net_module.load_state_dict(torch.load(self.checkpoint_dir + 'Net_best.pth', map_location=self.device))
        mAP, val_loss, accuracy, pr_cur_dir = self.evaluate(type='val', final=True)
        self.logger.info(f'Best model val mAP: {mAP}, loss: {val_loss}, accuracy: {accuracy}')
        self.writer.add_images('PR-curve/Eval', np.array(Image.open(pr_cur_dir)), dataformats='HWC')
        mAP, val_loss, accuracy, pr_cur_dir = self.evaluate(type='test', final=True)
        self.logger.info(f'Best model test mAP: {mAP}, loss: {val_loss}, accuracy: {accuracy}')
        self.writer.add_images('PR-curve/Test', np.array(Image.open(pr_cur_dir)), dataformats='HWC')

    @torch.no_grad()
    def evaluate(self, type='val', final=False):
        if type == 'val':
            dataloader = self.val_loader
            n_sample = self.n_val
        elif type == 'test':
            assert final == True, 'When type is test, final must be True but got False'
            dataloader = self.test_loader
            n_sample = self.n_test
        else:
            raise ValueError(f'type should be val or test, but got {type}')

        self.net.eval()
        tot_loss = 0
        true_list = []
        pred_list = []
        pred_ori_list = []
        with tqdm(total=n_sample, desc=f'{type} round', unit='img', leave=False) as pbar:
            for imgs, labels in dataloader:
                imgs, labels = imgs.to(self.device), labels.to(self.device)
                preds = self.net(imgs)
                if self.opt.n_classes > 1:
                    tot_loss += F.cross_entropy(preds, labels).item() * labels.size(0)
                    pred_idx = torch.softmax(preds, dim=1)
                else:
                    tot_loss += F.binary_cross_entropy_with_logits(preds.squeeze(1), labels.float()).item() * labels.size(0)
                    pred_sig = torch.sigmoid(preds)
                    pred_idx = torch.cat([1-pred_sig, pred_sig], dim=1)
                pred_ori_list += pred_idx.tolist()
                pred_idx = pred_idx.argmax(dim=1)
                labels_list = labels.tolist()
                true_list += labels_list
                pred_idx = pred_idx.tolist()
                pred_list.extend(pred_idx)
                pbar.update(labels.size(0))
        self.net.train()

        AP = []
        if final:
            plt.figure("P-R Curve")
            plt.title(f'{type} Precision/Recall Curve')
            plt.xlabel('Recall')
            plt.ylabel('Precision')
        for c in range(self.opt.n_classes):
            c_true_list = [int(item==c) for item in true_list]
            c_pred_ori_list = [item[c] for item in pred_ori_list]
            AP.append(metrics.average_precision_score(c_true_list, c_pred_ori_list))
            if final:
                c_precision, c_recall, _ = metrics.precision_recall_curve(c_true_list, c_pred_ori_list)
                plt.plot(c_recall, c_precision, label=f'class {c}')
        if final:
            plt.ylim(bottom=0)
            plt.legend() #plt.legend(loc="lower left")
            plt.savefig(os.path.join(self.checkpoint_dir, f'{type}_PR-curve.png'))
            plt.close()

        self.logger.info(f'{"Best model " if final else ""}{type} report:\n'+metrics.classification_report(true_list, pred_list, digits=4))

        return (float(np.mean(AP)), tot_loss/self.n_val, metrics.accuracy_score(true_list, pred_list)) if not final \
            else (float(np.mean(AP)), tot_loss/self.n_val, metrics.accuracy_score(true_list, pred_list), os.path.join(self.checkpoint_dir, f'{type}_PR-curve.png'))

    def __del__(self):
        del self.train_loader, self.val_loader, self.test_loader
        super(CETrainer, self).__del__()
