from __future__ import print_function, absolute_import

import sys
import time
from datetime import datetime

import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel

from . import evaluation_metrics
from .utils.distributed_ops import distributed_concat
from .utils.meters import AverageMeter
from .utils.visualization_utils import recognition_vis, stn_vis, attention_vis

metrics_factory = evaluation_metrics.factory()

from config import get_args

global_args = get_args(sys.argv[1:])


class BaseEvaluator(object):
    def __init__(self, model, metric, device, print_model_size=False):
        super(BaseEvaluator, self).__init__()
        self.model = model
        self.metric = metric
        self.device = device
        self.print_model_size = print_model_size

    def evaluate(self, data_loader, step=1, print_freq=1, tfLogger=None, dataset=None, vis_dir=None):
        self.model.eval()

        batch_time = AverageMeter()
        data_time = AverageMeter()

        # forward the network
        images, outputs, targets, losses = [], {}, [], []
        file_names = []

        total_num = 0
        total_time = 0.
        end = time.time()
        for i, inputs in enumerate(data_loader):
            data_time.update(time.time() - end)

            input_dict = self._parse_data(inputs)
            time_start = time.time()
            output_dict = self._forward(input_dict)
            torch.cuda.synchronize()
            total_time += time.time() - time_start

            # # debug
            # preds = dataset.decode(output_dict["output"]["pred_rec"].cpu().numpy())
            # gts = dataset.decode(input_dict["rec_targets"].cpu().numpy())
            # for p, g in zip(preds, gts):
            #     print(f"pred:{p}, gt:{g}")
            # vis
            if vis_dir:
                # recognition_vis(input_dict["images"], output_dict["output"]["pred_rec"], input_dict["rec_targets"], output_dict["output"]["feature"], output_dict["output"]["step_points"], dataset, vis_dir)
                stn_vis(input_dict["images"], output_dict["output"]["rectified_images"], output_dict["output"]["ctrl_points"], output_dict["output"]["pred_rec"], input_dict["rec_targets"], dataset, vis_dir)
                # attention_vis(output_dict["output"]["rectified_images"], output_dict["output"]["attention_map"], output_dict["output"]["pred_rec"], input_dict["rec_targets"], dataset, vis_dir)

            batch_size = input_dict['images'].size(0)
            total_num += batch_size

            total_loss_batch = 0.
            for k, loss in output_dict['losses'].items():
                loss = loss.mean(dim=0, keepdim=True)
                total_loss_batch += loss.item() * batch_size

            images.append(input_dict['images'])
            targets.append(input_dict['rec_targets'])
            losses.append(total_loss_batch)
            if global_args.evaluate_with_lexicon:
                file_names += input_dict['file_name']
            for k, v in output_dict['output'].items():
                if k not in outputs:
                    outputs[k] = []
                outputs[k].append(v)

            batch_time.update(time.time() - end)

            if (i + 1) % print_freq == 0:
                print('[{}]\t'
                      'Evaluation: [{}/{}]\t'
                      'Time {:.3f} ({:.3f})\t'
                      'Data {:.3f} ({:.3f})\t'
                      # .format(strftime("%Y-%m-%d %H:%M:%S", gmtime()),
                      .format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                              i + 1, len(data_loader),
                              batch_time.val, batch_time.avg,
                              data_time.val, data_time.avg))
            end = time.time()
        # print speed
        print(f"total num: {total_num}")
        print(f"total time: {total_time} s.")
        print(f"average speed: {total_time / total_num * 1000} ms/image. FPS: {total_num / total_time}")

        if not global_args.keep_ratio:
            images = torch.cat(images)
            num_samples = images.size(0)
        else:
            num_samples = sum([subimages.size(0) for subimages in images])
        targets = torch.cat(targets)
        losses = np.sum(losses) / (1.0 * num_samples)
        for k, v in outputs.items():
            outputs[k] = torch.cat(outputs[k])

        # save info for recognition
        if 'pred_rec' in outputs:
            if isinstance(self.model, DistributedDataParallel):
                preds = distributed_concat(outputs["pred_rec"], len(dataset))  # 输入必须是cuda类型的tensor
                targets = distributed_concat(targets, len(dataset))  # 输入必须是cuda类型的tensor
            else:
                preds = outputs["pred_rec"]
            # evaluation with metric
            if global_args.evaluate_with_lexicon:
                eval_res = metrics_factory[self.metric + '_with_lexicon'](preds, targets, dataset, file_names)
                print('lexicon0: {0}, {1:.3f}'.format(self.metric, eval_res[0]))
                print('lexicon50: {0}, {1:.3f}'.format(self.metric, eval_res[1]))
                print('lexicon1k: {0}, {1:.3f}'.format(self.metric, eval_res[2]))
                print('lexiconfull: {0}, {1:.3f}'.format(self.metric, eval_res[3]))
                eval_res = eval_res[0]
            else:
                eval_res = metrics_factory[self.metric](preds, targets, dataset)
                print('lexicon0: {0}: {1:.3f}'.format(self.metric, eval_res))
            # pred_list, targ_list, score_list = RecPostProcess(preds, targets, outputs['pred_rec_score'], dataset)

            if tfLogger is not None:
                # (1) Log the scalar values
                info = {
                    'loss': losses,
                    self.metric: eval_res,
                }
                for tag, value in info.items():
                    tfLogger.add_scalar(tag, value, step)

        # ====== Visualization ======#
        if vis_dir is not None:
            # recognition_vis(images, preds, targets, score_list, dataset, vis_dir)
            # stn_vis(images, outputs['rectified_images'], outputs['ctrl_points'], preds,
            # targets, score_list, outputs['pred_score'] if 'pred_score' in outputs else None, dataset, vis_dir)
            pass
        return eval_res

    def _parse_data(self, inputs):
        raise NotImplementedError

    def _forward(self, inputs, opt):
        raise NotImplementedError


class Evaluator(BaseEvaluator):
    def _parse_data(self, inputs):
        input_dict = {}
        if global_args.evaluate_with_lexicon:
            imgs, label_encs, lengths, file_name = inputs
        else:
            imgs, label_encs, lengths = inputs

        with torch.no_grad():
            images = imgs.to(self.device)
            if label_encs is not None:
                labels = label_encs.to(self.device)
                lengths = lengths.to(self.device)

        input_dict['images'] = images
        input_dict['rec_targets'] = labels
        input_dict['rec_lengths'] = lengths
        if global_args.evaluate_with_lexicon:
            input_dict['file_name'] = file_name
        return input_dict

    def _forward(self, input_dict):
        self.model.eval()
        with torch.no_grad():
            output_dict = self.model(input_dict)
            if self.print_model_size:
                from thop import profile
                flops, params = profile(self.model, inputs=(input_dict,))
                flops = flops / input_dict['images'].size(0)
                print('flops:{}'.format(flops))
                print('params:{}'.format(params))
                self.print_model_size = False
        return output_dict


class EvaluatorTrans(BaseEvaluator):
    def _parse_data(self, inputs):
        input_dict = {}
        if global_args.evaluate_with_lexicon:
            imgs, label_encs, lengths, label_mask, file_name = inputs
        else:
            imgs, label_encs, lengths, label_mask = inputs

        with torch.no_grad():
            images = imgs.to(self.device)
            label_mask = label_mask.to(self.device)
            if label_encs is not None:
                labels = label_encs.to(self.device)
                lengths = lengths.to(self.device)

        input_dict['images'] = images
        input_dict['rec_targets'] = labels
        input_dict['rec_lengths'] = lengths
        input_dict['label_mask'] = label_mask
        if global_args.evaluate_with_lexicon:
            input_dict['file_name'] = file_name
        return input_dict

    def _forward(self, input_dict):
        self.model.eval()
        with torch.no_grad():
            output_dict = self.model(input_dict)
        return output_dict
