import math
import os
import sys
from typing import Iterable
import numpy as np
import copy
import itertools

import torch

import util.draw_pic as drawpic
import util.misc as utils
from datasets.hico_eval import HICOEvaluator
from datasets.vcoco_eval import VCOCOEvaluator
#from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import torchvision.transforms.functional as F


def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
                    data_loader: Iterable, optimizer: torch.optim.Optimizer,
                    device: torch.device, epoch: int, max_norm: float = 0, 
                    amp=False, lr_decay=False, lr_scheduler: list = [0], 
                    ema = None):

    model.train()
    criterion.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    # #   CDN
    # if hasattr(criterion, 'loss_obj_labels'):
    #     metric_logger.add_meter('obj_class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
    # #   DETR
    # elif hasattr(criterion, 'loss_labels'):
    #     metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 10
    #   线性衰减pix2seq
    if lr_decay:
        optimizer.param_groups[0]['lr'] = lr_scheduler[epoch]
        optimizer.param_groups[1]['lr'] = lr_scheduler[epoch] * 0.1

    scaler = torch.cuda.amp.GradScaler() if amp else None
    for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
        samples = samples.to(device)
        targets = [{k: v.to(device) for k, v in t.items() if k != 'filename' and k!='id' and k!='img_id'} for t in targets]
        if not amp:
            outputs = model(samples, targets)
            loss_dict = criterion(outputs, targets)
            weight_dict = criterion.weight_dict
            losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
            # reduce losses over all GPUs for logging purposes
            loss_dict_reduced = utils.reduce_dict(loss_dict)
            loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
            loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
            losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())

            loss_value = losses_reduced_scaled.item()

            if not math.isfinite(loss_value):
                print("Loss is {}, stopping training".format(loss_value))
                print(loss_dict_reduced)
                sys.exit(1)

            optimizer.zero_grad()
            losses.backward()
            if max_norm > 0:
                grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
            else:
                grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm)
            optimizer.step()
        elif amp:
            with torch.cuda.amp.autocast():
                outputs = model(samples)
                loss_dict = criterion(outputs, targets)
                weight_dict = criterion.weight_dict
                losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)

                loss_dict_reduced = utils.reduce_dict(loss_dict)
                loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
                loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
                losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())

                loss_value = losses_reduced_scaled.item()

            if not math.isfinite(loss_value):
                print("Loss is {}, stopping training".format(loss_value))
                print(loss_dict_reduced)
                sys.exit(1)

            optimizer.zero_grad()
            scaler.scale(losses).backward()
            if max_norm > 0:
                grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
            else:
                grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm)
            scaler.step(optimizer)
            scaler.update() 
        if ema is not None:
            ema.update()      
        ###
        metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
        #   CDN
        # if hasattr(criterion, 'loss_obj_labels'):
        #     metric_logger.update(obj_class_error=loss_dict_reduced['obj_class_error'])
        # #   DETR
        # elif hasattr(criterion, 'loss_labels'):
        #     metric_logger.update(class_error=loss_dict_reduced['class_error'])
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        metric_logger.update(grad_norm=grad_total_norm)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}



@torch.no_grad()
def evaluate_hoi(dataset_file, model, postprocessors, data_loader, subject_category_id, device, args, epoch, tb_writer=None, ema = None):
    model.eval()
    if ema is not None: 
        ema.apply_shadow()
        
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    preds = []
    gts = []
    indices = []
    results_list = []
    targets_list = []
    per_batch = 0
    
    hooks = []
    if tb_writer is not None and args.painting:  
        hooks, conv_features, enc_attn_weights, obj_dec_attn_weights, interaction_dec_attn_weights, human_dec_attn_weights = hook_list_process(device, model)

    for samples, targets in metric_logger.log_every(data_loader, 10, header):#一张一张图载入
        samples = samples.to(device)
        outputs = model(samples, targets)
        #   483, 500
        orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)

        results = postprocessors['hoi'](outputs, orig_target_sizes)
        # if 'segm' in postprocessors.keys():
        #     results = {}
        #     target_sizes = torch.stack([t["size"] for t in targets], dim=0)
        #     results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
        results_list.extend(results)
        targets_list.extend(targets)
        if tb_writer is not None and args.painting:  
            #   N, 2048, 35, 35  src
            outputs['conv_features'] = conv_features
            #   N,  35x35, 35x35 memory
            outputs['enc_attn_weights'] = enc_attn_weights
            #   N,  100, 35x35(sotmax之后)
            outputs['obj_dec_attn_weights'] = obj_dec_attn_weights
            outputs['human_dec_attn_weights'] = human_dec_attn_weights
            outputs['interaction_dec_attn_weights'] = interaction_dec_attn_weights     
            tb_writer = drawpic.tensorboard_plt(orig_target_sizes, samples, outputs, epoch, tb_writer, args, targets, results, per_batch)
            per_batch += len(targets)
    
    preds.extend(list(itertools.chain.from_iterable(utils.all_gather(results_list))))
    gts.extend(list(itertools.chain.from_iterable(utils.all_gather(copy.deepcopy(targets_list)))))            
    if tb_writer is not None:
        for hook in hooks:
            hook.remove()
            
    if ema is not None: 
        ema.restore()
        
    metric_logger.synchronize_between_processes()

    img_ids = [img_gts['id'] for img_gts in gts]
    _, indices = np.unique(img_ids, return_index=True)
    preds = [img_preds for i, img_preds in enumerate(preds) if i in indices]
    gts = [img_gts for i, img_gts in enumerate(gts) if i in indices]

    if dataset_file == 'hico':
        evaluator = HICOEvaluator(preds, gts, epoch, args) 
        stats = evaluator.evaluation_default()
        stats_ko = evaluator.evaluation_ko()
        stats.update(stats_ko)
        if args.eval_extra:
            evaluator.evaluation_extra()
    elif dataset_file == 'vcoco':
        evaluator = VCOCOEvaluator(preds, gts, data_loader.dataset.correct_mat, args)
        stats = evaluator.evaluate()
    #torch.cuda.empty_cache()
    return stats

##############################################################################################################################################

def hook_list_process(device, model):   
    conv_features, enc_attn_weights, obj_dec_attn_weights, interaction_dec_attn_weights = [], [], [], []
    human_dec_attn_weights = []
    if device == 'cpu':
        hooks = [
                #model.backbone[-2].register_forward_hook(lambda self, input, output: conv_features.append(output)),
                #model.transformer.encoder.layers[-1].self_attn.register_forward_hook(lambda self, input, output: enc_attn_weights.append(output[1])),
                model.transformer.decoder.layers[-1].multihead_attn.register_forward_hook(lambda self, input, output: obj_dec_attn_weights.append(output[1])),
                # model.transformer.human_decoder.layers[-1].multihead_attn.register_forward_hook(lambda self, input, output: human_dec_attn_weights.append(output[1])),
                model.transformer.interaction_decoder.layers[-1].multihead_attn.register_forward_hook(lambda self, input, output: interaction_dec_attn_weights.append(output[1])),
            ]
    else:
        hooks = [
                #model.module.backbone[-2].register_forward_hook(lambda self, input, output: conv_features.append(output)),
                #model.module.transformer.encoder.layers[-1].self_attn.register_forward_hook(lambda self, input, output: enc_attn_weights.append(output[1])),
                model.module.transformer.decoder.layers[-1].multihead_attn.register_forward_hook(lambda self, input, output: obj_dec_attn_weights.append(output[1])),
                # model.module.transformer.human_decoder.layers[-1].multihead_attn.register_forward_hook(lambda self, input, output: human_dec_attn_weights.append(output[1])),
                model.module.transformer.interaction_decoder.layers[-1].multihead_attn.register_forward_hook(lambda self, input, output: interaction_dec_attn_weights.append(output[1])),
            ]
    return hooks, conv_features, enc_attn_weights, obj_dec_attn_weights, interaction_dec_attn_weights, human_dec_attn_weights




def exponential_inc_iterative_loss(loss_dict_reduced, weight_dict, model):
    loss_dict_reduced_scaled = {k: v * weight_dict[k]
                                for k, v in loss_dict_reduced.items() if k in weight_dict}
    for k, v in loss_dict_reduced.items():
        None
    return loss_dict_reduced_scaled