import itertools
import random
import time
import copy
import os
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from PIL import Image
from src.trainer import BaseTrainer, find_instance_lvl_strategy, find_dataset_lvl_strategy
from src.utils.image_utils import add_heatmap_to_image, img_color_denormalize, draw
from src.models.mvselect import aggregate_feat, get_eps_thres
from src.loss import VGGLOSS
from src.utils.meters import PSNRMeter,SSIMMeter,LPIPSMeter,AverageMeter
from torchmetrics.functional import structural_similarity_index_measure
import torchvision
class PatchSelection:
    def __init__(self, patch_size, img_resize, patch_sel_num=None) -> None:
        self.patch_size = patch_size
        self.img_resize = img_resize
        
        self.patch_w_num = int(self.img_resize/self.patch_size)
        self.patch_h_num = int(self.img_resize/self.patch_size)
        self.patch_num = self.patch_w_num*self.patch_h_num
        if patch_sel_num is None:
            self.patch_sel_num = self.patch_num
        else:
            self.patch_sel_num = patch_sel_num

    def get_row_col(self, n, k):
        row = k // n
        col = k % n
        return row, col

    def sel_method(self, method="random"):
        if method=="random":
            random_ints = random.sample(range(self.patch_num), self.patch_sel_num)
            patch_sel_ind = torch.tensor(random_ints)
        elif method=="rl":
            pass
        return patch_sel_ind

    def sel_patch(self, imgs, tgts, method="random"):
        B,N,C,H,W = imgs.shape
        patch_masks = []
        patch_crops_imgs = []
        patch_crops_tgts = []
        patch_sel_inds = []
        for b in range(B):
            for n in range(N):
                img = imgs[b,n,...]
                tgt = tgts[b,n,...]
                patch_crops_img = []
                patch_crops_tgt = []
                patch_mask = torch.zeros_like(img)
                patch_sel_ind = self.sel_method(method=method)
                patch_sel_inds.append(patch_sel_ind)
                for sel_ind in patch_sel_ind:
                    row, col = self.get_row_col(self.patch_w_num, sel_ind)
                    patch_mask[:,row*self.patch_size:(row+1)*self.patch_size,col*self.patch_size:(col+1)*self.patch_size] = 1
                    patch_crops_img.append(img[:,row*self.patch_size:(row+1)*self.patch_size,col*self.patch_size:(col+1)*self.patch_size])
                    patch_crops_tgt.append(tgt[:,row*self.patch_size:(row+1)*self.patch_size,col*self.patch_size:(col+1)*self.patch_size])
                patch_masks.append(patch_mask)
                patch_crops_img = torch.stack(patch_crops_img)
                patch_crops_tgt = torch.stack(patch_crops_tgt)
                patch_crops_imgs.append(patch_crops_img)
                patch_crops_tgts.append(patch_crops_tgt)
        patch_masks = torch.stack(patch_masks).bool()
        patch_sel_inds = torch.stack(patch_sel_inds)
        patch_crops_imgs = torch.stack(patch_crops_imgs)
        patch_crops_tgts = torch.stack(patch_crops_tgts)

        patch_masks = patch_masks.unflatten(0, [B, N])
        patch_sel_inds = patch_sel_inds.unflatten(0, [B, N])
        patch_crops_imgs = patch_crops_imgs.unflatten(0, [B, N])
        patch_crops_tgts = patch_crops_tgts.unflatten(0, [B, N])
        return patch_sel_inds,patch_masks,patch_crops_imgs,patch_crops_tgts

    def crop_neural_texture(self, neural_texture, patch_sel_inds):
        B,pN = patch_sel_inds.shape
        patch_crops_neural_texture = []
        for b in range(B):
            patch_sel_ind = patch_sel_inds[b,]
            for sel_ind in patch_sel_ind:
                row, col = self.get_row_col(self.patch_w_num, sel_ind)
                patch_crops_neural_texture.append(neural_texture[b,:,row*self.patch_size:(row+1)*self.patch_size,col*self.patch_size:(col+1)*self.patch_size])
        patch_crops_neural_texture = torch.stack(patch_crops_neural_texture)
        patch_crops_neural_texture = patch_crops_neural_texture.unflatten(0, [B, pN])
        return patch_crops_neural_texture

    def assign_output_patch_to_image(self, outputs, patch_sel_inds):
        B,pN = patch_sel_inds.shape
        for b in range(B):
            for p in range(pN):
                row, col = self.get_row_col(self.patch_w_num, patch_sel_inds[b,p])
                outputs["outputs"][b,:,row*self.patch_size:(row+1)*self.patch_size,col*self.patch_size:(col+1)*self.patch_size] = outputs["outputs_patches"][b,p,...]
        return outputs

class DNRTrainer(BaseTrainer):
    def __init__(self, model, logdir, args, ):
        super(DNRTrainer, self).__init__(model, logdir, args, )
        self.criterionL1 = torch.nn.L1Loss(reduction='mean')
        self.criterionL1Smooth = torch.nn.SmoothL1Loss(reduction='mean')
        self.criterionL2 = torch.nn.MSELoss(reduction='mean')

        self.metrics = {
            "loss":AverageMeter(),
            "psnr":PSNRMeter(),
            "ssim":SSIMMeter(device="cuda"),
            "lpips":LPIPSMeter(device="cuda")
        }
        for metric_name,metric in self.metrics.items():
            metric.clear()
        
        if self.args.lambda_VGG > 0.0:
            print('initialize VGG loss')
            self.vggloss = VGGLOSS().to("cuda")

        if self.args.patch_size is not None:
            self.patch_sel_model = PatchSelection(self.args.patch_size, self.args.img_resize, self.args.patch_sel_num)
            # self.patch_sel_model_val = PatchSelection(self.args.patch_size, self.args.img_resize, None)
            self.patch_sel_model_val = None
        else:
            self.patch_sel_model = None
            self.patch_sel_model_val = None

    def task_loss(self, output, tgt):
        pred = output["outputs"]
        mask = output["sel_masks"].unsqueeze(0) if len(output["sel_masks"].shape)==3 else output["sel_masks"]
        eps = 0.0001
        sum_mask = torch.sum(mask) + eps
        #if sum_mask == 0:
        #    return
        num_pixels_max = mask.shape[-2] * mask.shape[-1]
        mask_weight = (num_pixels_max) / sum_mask
        mask = torch.cat([mask,mask,mask], 1)
        def masked(img):
            return torch.where(mask, img, torch.zeros_like(img))

        ## absolute rendering error
        loss_texture_L1 = 0.0
        if self.args.lambda_L1_Texture>0.0:
            texture = output["texture"]
            loss_texture_L1 += self.args.lambda_L1_Texture * self.criterionL1Smooth(masked(texture), masked(tgt) ) / mask_weight
        
        loss_ssim = 0.0
        if self.args.lambda_SSIM>0.0:
            loss_ssim += self.args.lambda_SSIM * (1 - structural_similarity_index_measure(masked(pred), masked(tgt) )) / mask_weight

        loss_G_L1 = 0.0
        if self.args.lambda_L1>0.0:
            loss_G_L1 += self.args.lambda_L1 * self.criterionL1Smooth(masked(pred), masked(tgt) ) / mask_weight


        def get_rfft(input):
            output = torch.fft.fft2(input, dim=(-2, -1))
            output = torch.stack((output.real, output.imag), -1)
            return output

        loss_fft = 0.0
        if self.args.lambda_fft>0.0:
            pred_fft = get_rfft(masked(pred))
            tgt_fft = get_rfft(masked(tgt))
            loss_fft += self.args.lambda_fft * self.criterionL1Smooth(pred_fft, tgt_fft) / mask_weight

        ## loss based on image differences (more invariant to color shifts)
        loss_G_L1_Diff = 0.0
        if self.args.lambda_L1_Diff>0.0:
            fake_diff_x = pred[:,:,:,1:] - pred[:,:,:,:-1]
            fake_diff_y = pred[:,:,1:,:] - pred[:,:,:-1,:]
            target_masked = masked(tgt)
            target_diff_x = target_masked[:,:,:,1:] - target_masked[:,:,:,:-1]
            target_diff_y = target_masked[:,:,1:,:] - target_masked[:,:,:-1,:]
            
            h_tv = torch.pow((target_masked[:,:,1:,:] - target_masked[:,:,:-1,:]),2).sum()
            w_tv = torch.pow((target_masked[:,:,:,1:] - target_masked[:,:,:,:-1]),2).sum()

            loss_G_L1_Diff += self.args.lambda_L1_Diff * self.criterionL1Smooth(fake_diff_x, target_diff_x) / mask_weight
            loss_G_L1_Diff += self.args.lambda_L1_Diff * self.criterionL1Smooth(fake_diff_y, target_diff_y) / mask_weight

            def _tensor_size(t):
                return t.size()[1]*t.size()[2]*t.size()[3]

            count_h = _tensor_size(target_masked[:,:,1:,:])
            count_w = _tensor_size(target_masked[:,:,:,1:])
            loss_G_L1_Diff += self.args.lambda_L1_Diff * 2 *(h_tv/count_h+w_tv/count_w)

        ## VGG loss
        loss_G_VGG = 0.0
        if self.args.lambda_VGG>0.0:
            loss_G_VGG += self.args.lambda_VGG * self.vggloss(masked(pred), masked(tgt))

        ## texture regularizer
        if self.args.lambda_Reg_Tex>0.0:
            loss_G_TexReg = self.args.lambda_Reg_Tex * self.model.texture.regularizer()
        task_loss = loss_G_L1 + loss_G_L1_Diff + loss_G_VGG + loss_G_TexReg + loss_texture_L1 + loss_ssim + loss_fft

        return task_loss

    def task_loss_reward(self, overall_feat, tgt, step, sel_ind):
        output = self.model.get_output(overall_feat, sel_ind)
        task_loss = self.task_loss(output=output,tgt=tgt[:,sel_ind,:,:,:].squeeze(1))

        # if step < self.args.steps - 1:
        #     reward = torch.zeros_like(task_loss).cuda()
        # else:
        #     reward = -task_loss.detach()
        reward = -task_loss.detach()
        # reward = task_loss.detach()
        return task_loss, reward

    def train(self, epoch, dataloader, optimizer, scheduler=None, log_interval=50):
        self.model.train()
        if self.args.base_lr_ratio == 0:
            self.model.base.eval()
        losses = 0
        t0 = time.time()
        action_sum = torch.zeros([dataloader.dataset.num_cam]).cuda()
        return_avg = None
        epoch_time_meter = AverageMeter()
        for batch_idx, batch_data in enumerate(dataloader):
            imgs = batch_data['imgs']
            tgt = batch_data['tgts']
            keep_cams = batch_data['keep_cams']
            imgs_name = batch_data['imgs_name']
            extrinsics = batch_data['extrinsics']
            B, N = imgs.shape[:2]
            imgs = imgs.cuda() #BNCHW
            tgt = tgt.cuda()
            if self.patch_sel_model is not None:
                patch_sel_inds,patch_masks,patch_crops_imgs,patch_crops_tgts = self.patch_sel_model.sel_patch(imgs, tgt)
                imgs[patch_masks==0] = -1.0
                tgt[patch_masks==0] = -1.0
            start_epoch = time.time()
            # torchvision.utils.save_image((imgs[0,0]+1)/2.0, 'imgs.png')
            # torchvision.utils.save_image(patch_masks[0,0].float(), 'masks.png')
            # torchvision.utils.save_image((tgt[0,0]+1)/2.0, 'tgt.png')
            # breakpoint()

            feat, _ = self.model.get_feat(imgs, extrinsics)

            if self.args.steps:
                eps_thres = get_eps_thres(epoch - 1 + batch_idx / len(dataloader), self.args.epochs)
                loss, (action_sum, return_avg, value_loss) = \
                    self.expand_episode(feat, keep_cams, tgt, eps_thres, (action_sum, return_avg))
            else:
                overall_feat,sel_ind = aggregate_feat(feat, keep_cams, self.args.aggregation)
                tgt = tgt[:,sel_ind,:,:,:]
                
                if self.patch_sel_model is not None:
                    patch_sel_inds = patch_sel_inds[:,sel_ind]
                    feat_crops = self.patch_sel_model.crop_neural_texture(overall_feat, patch_sel_inds)
                    output = self.model.get_crop_output(overall_feat, sel_ind, feat_crops)
                    output = self.patch_sel_model.assign_output_patch_to_image(output, patch_sel_inds)
                    # torchvision.utils.save_image((output["outputs"]+1)/2.0, 'output.png')
                    # torchvision.utils.save_image((output["texture"]+1)/2.0, 'texture.png')
                    # breakpoint()
                    # exit()
                else:
                    output = self.model.get_output(overall_feat, sel_ind)
                loss = self.task_loss(output=output,tgt=tgt)
            epoch_time_meter.update(time.time()-start_epoch)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            losses += loss.item()

            if scheduler is not None:
                if isinstance(scheduler, torch.optim.lr_scheduler.OneCycleLR):
                    scheduler.step()
                elif isinstance(scheduler, torch.optim.lr_scheduler.CosineAnnealingWarmRestarts) or \
                        isinstance(scheduler, torch.optim.lr_scheduler.LambdaLR):
                    scheduler.step(epoch - 1 + batch_idx / len(dataloader))
            # logging
            if (batch_idx + 1) % log_interval == 0 or batch_idx + 1 == len(dataloader):
                # print(cyclic_scheduler.last_epoch, optimizer.param_groups[0]['lr'])
                t1 = time.time()
                t_epoch = t1 - t0
                print(f'Train epoch: {epoch}, batch:{(batch_idx + 1)}, '
                      f'loss: {losses / (batch_idx + 1):.3f}, time: {t_epoch:.1f}, batch avg time: {epoch_time_meter.measure():.3f}')
                if self.args.steps:
                    print(f'value loss: {value_loss:.3f}, eps: {eps_thres:.3f}, return: {return_avg[-1]:.2f}')
                    # print(f'value loss: {value_loss:.3f}, policy loss: {policy_loss:.3f}, '
                    #       f'return: {return_avg[-1]:.2f}, entropy: {entropies.mean():.3f}')
                    print(' '.join('cam {} {:.2f} |'.format(cam, freq) for cam, freq in
                                   zip(range(N), F.normalize(action_sum, p=1, dim=0).cpu())))
                pass
        print("Epoch Train time: %f sec"%(epoch_time_meter.measure()*len(dataloader)))
        return losses / len(dataloader), None

    def test(self, dataloader, test_type="Val", is_eval=False):
        t0 = time.time()
        self.model.eval()
        for metric_name,metric in self.metrics.items():
            metric.clear()
        for batch_idx, batch_data in enumerate(dataloader):
            imgs = batch_data['imgs']
            tgt = batch_data['tgts']
            keep_cams = batch_data['keep_cams']
            imgs_name = batch_data['imgs_name']
            extrinsics = batch_data['extrinsics']
            imgs, tgt = imgs.cuda(), tgt.cuda()
            if self.patch_sel_model_val is not None:
                patch_sel_inds,patch_masks,patch_crops_imgs,patch_crops_tgts = self.patch_sel_model_val.sel_patch(imgs, tgt)
                imgs[patch_masks==0] = -1.0
                tgt[patch_masks==0] = -1.0
            B, N = imgs.shape[:2]
            with torch.no_grad():
                # if self.args.steps == 0 or init_cam is None:
                feat, _ = self.model.get_feat(imgs, extrinsics)
                if self.args.steps and is_eval:
                    eps_thres = 0
                    action_sum = torch.zeros([dataloader.dataset.num_cam]).cuda()
                    return_avg = None
                    loss, (action_sum, return_avg, value_loss) = \
                        self.expand_episode(feat, keep_cams, tgt, eps_thres, (action_sum, return_avg), is_eval=is_eval)
                    print(f'value loss: {value_loss:.3f}, eps: {eps_thres:.3f}, return: {return_avg[-1]:.2f}')
                    action_dict = {cam: freq for cam, freq in zip(range(N), F.normalize(action_sum, p=1, dim=0).cpu())}
                    action_dict = sorted(action_dict.items(),key=lambda s:s[1], reverse=True)
                    return action_dict
                else:
                    overall_feat, sel_ind = aggregate_feat(feat, keep_cams, "first")
                    tgt = tgt[:,sel_ind,:,:,:]
                
                    if self.patch_sel_model_val is not None:
                        patch_sel_inds = patch_sel_inds[:,sel_ind]
                        feat_crops = self.patch_sel_model_val.crop_neural_texture(overall_feat, patch_sel_inds)
                        output = self.model.get_crop_output(overall_feat, sel_ind, feat_crops)
                        output = self.patch_sel_model_val.assign_output_patch_to_image(output, patch_sel_inds)
                        # torchvision.utils.save_image((output["outputs"]+1)/2.0, 'output.png')
                        # torchvision.utils.save_image((output["outputs_patches"][0,12,...]+1)/2.0, 'crop.png')
                        # exit()
                    else:
                        output = self.model.get_output(overall_feat, sel_ind)
                    loss = self.task_loss(output=output,tgt=tgt).item()
                
                self.metrics["loss"].update(loss)

                pred = output["outputs"]
                mask = output["sel_masks"].unsqueeze(0) if len(output["sel_masks"].shape)==3 else output["sel_masks"]
                mask = torch.cat([mask,mask,mask], 1)
                
                # if batch_idx % 100 == 0:
                save_path = os.path.join(self.logdir, "visual", "%s_%s"%(test_type,imgs_name[0][sel_ind]))
                draw(pred,tgt,feat[:,sel_ind,:,:,:],imgs[:,sel_ind,:,:,:],mask,save_path)

                def masked(img):
                    return torch.where(mask, img, -1 + torch.zeros_like(img))
                pred = masked(pred)
                tgt = masked(tgt)
                pred = (pred+1)/2.0
                tgt = (tgt+1)/2.0
                for metric_name,metric in self.metrics.items():
                    if "loss" not in metric_name:
                        metric.update(pred,tgt)
                    
            # if batch_idx % 100 == 0:
            #     print("----%s batch:%d-----"%(test_type,batch_idx))
            #     for metric_name,metric in self.metrics.items():
            #         print("%s:%f"%(metric_name,metric.measure()))
        print("--------%s time:%d sec---------"%(test_type, time.time()-t0))
        print("loss/psnr/ssim/lpips: %f/%f/%f/%f"%(self.metrics["loss"].measure(),self.metrics["psnr"].measure(),self.metrics["ssim"].measure(),self.metrics["lpips"].measure()))
        # for metric_name,metric in self.metrics.items():
        #     print("%s:%f"%(metric_name,metric.measure()))

        return self.metrics["loss"].measure(), [self.metrics["psnr"].measure(),self.metrics["ssim"].measure(),self.metrics["lpips"].measure()]

    def test_cam_combination(self, dataloader, step=0):
        self.model.eval()
        t0 = time.time()
        candidates = np.eye(dataloader.dataset.num_cam)
        combinations = np.array(list(itertools.combinations(candidates, step + 1))).sum(1)
        K, N = combinations.shape
        loss_s, pred_s, gt_s = [], [], []
        for batch_idx, (imgs, tgt, keep_cams) in enumerate(dataloader):
            B, N = imgs.shape[:2]
            gt_s.append(tgt)
            tgt = tgt.unsqueeze(0).repeat([K, 1])
            # K, B, N
            with torch.no_grad():
                output, _ = self.model.forward_combination(imgs.cuda(), None, self.args.down, combinations, keep_cams)
            loss = F.cross_entropy(output.flatten(0, 1), tgt.flatten(0, 1).cuda(), reduction="none")
            pred = torch.argmax(output, -1)
            loss_s.append(loss.unflatten(0, [K, B]).cpu())
            pred_s.append(pred.cpu())
        loss_s, pred_s, gt_s = torch.cat(loss_s, 1), torch.cat(pred_s, 1), torch.cat(gt_s)
        # K, num_frames
        tp_s = (pred_s == gt_s[None, :]).float()
        # instance level selection
        instance_lvl_strategy = find_instance_lvl_strategy(tp_s, combinations)
        instance_lvl_oracle = np.take_along_axis(tp_s, instance_lvl_strategy, axis=0).mean(1).numpy()[:, None]
        # dataset level selection
        keep_cam_idx = combinations[:, keep_cams[0].bool().numpy()].sum(1).astype(np.bool)
        dataset_lvl_prec = tp_s.mean(1).numpy()[:, None]
        dataset_lvl_strategy = find_dataset_lvl_strategy(dataset_lvl_prec, combinations)
        dataset_lvl_best_prec = dataset_lvl_prec[dataset_lvl_strategy]
        oracle_info = f'{step} steps, averave acc {dataset_lvl_prec[keep_cam_idx].mean():.1%}, ' \
                      f'dataset lvl best {dataset_lvl_best_prec.mean():.1%}, ' \
                      f'instance lvl oracle {instance_lvl_oracle.mean():.1%}, time: {time.time() - t0:.1f}s'
        print(oracle_info)
        return loss_s.mean(1).numpy(), dataset_lvl_prec * 100.0, instance_lvl_oracle * 100.0, oracle_info
