from __future__ import absolute_import

import os

import cv2
# from scipy.misc import imresize
import matplotlib
import numpy as np
from PIL import Image

matplotlib.use('Agg')
import matplotlib.pyplot as plt
from io import BytesIO
from matplotlib import cm
from multiprocessing import Pool

from . import to_numpy
from ..evaluation_metrics.metrics import get_str_list


def recognition_vis(images, preds, targets, encoder_feature, step_points, dataset, vis_dir):
    images = images.permute(0, 2, 3, 1)
    images = to_numpy(images)
    images = (images * 0.5 + 0.5) * 255
    pred_list, targ_list = get_str_list(preds, targets, dataset)
    encoder_feature = encoder_feature.permute(1, 0, 2, 3)[:3].permute(1, 2, 3, 0)
    encoder_feature = to_numpy(encoder_feature)
    encoder_feature = (encoder_feature * 0.5 + 0.5) * 255
    step_points = to_numpy(step_points)
    for i, (image, pred, target, f, p) in enumerate(zip(images, pred_list, targ_list, encoder_feature, step_points)):
        image = np.uint8(image)
        f = np.uint8(f)
        if pred.lower() == target.lower():
            flag = 'right'
        else:
            flag = 'error'
        file_name = '{:}_{:}_{:}_{:}.jpg'.format(flag, i, pred, target)
        # cv2.imwrite(os.path.join(vis_dir, file_name), image)
        h_r = image.shape[0] / f.shape[0]
        w_r = image.shape[1] / f.shape[1]
        print(f"pred:{pred}, gt:{target}, ", end="")
        for j in range(p.shape[0]):
            print("({:.3},{:.3}),".format(p[j][0], p[j][1]), end="")
            x, y = int(p[j][0] * w_r), int(p[j][1] * h_r)
            image = cv2.circle(image.copy(), (x, y), 4, (0, 0, 255), thickness=1)
            # f = cv2.circle(f.copy(), (int(p[j][0]), int(p[j][1])), 1, (0, 0, 255), thickness=1)
            f[int(p[j][1]), int(p[j][0]), :] = [0, 0, 255]
            if j > 0:
                start = int(p[j - 1][0] * w_r), int(p[j - 1][1] * h_r)
                end = int(p[j][0] * w_r), int(p[j][1] * h_r)
                # f_start = int(p[j - 1][0]), int(p[j - 1][1])
                # f_end = int(p[j][0]), int(p[j][1])
                cv2.arrowedLine(image, start, end, (0, 0, 255), thickness=1)
                # cv2.arrowedLine(f, f_start, f_end, (0, 0, 255), thickness=1)
        cv2.imwrite(os.path.join(vis_dir, file_name.strip(".jpg") + "_points.jpg"), image)
        cv2.imwrite(os.path.join(vis_dir, file_name.strip(".jpg") + "_feature.jpg"), f)
        print()


# save to disk sub process
def _save_plot_pool(vis_image, save_file_path):
    vis_image = Image.fromarray(np.uint8(vis_image))
    vis_image.save(save_file_path)


def stn_vis(raw_images, rectified_images, ctrl_points, preds, targets, dataset, vis_dir):
    """
      raw_images: images without rectification
      rectified_images: rectified images with stn
      ctrl_points: predicted ctrl points
      preds: predicted label sequences
      targets: target label sequences
      real_scores: scores of recognition model
      pred_scores: predicted scores by the score branch
      dataset: xxx
      vis_dir: xxx
    """
    if raw_images.ndimension() == 3:
        raw_images = raw_images.unsqueeze(0)
        rectified_images = rectified_images.unsqueeze(0)
    batch_size, _, raw_height, raw_width = raw_images.size()

    # translate the coordinates of ctrlpoints to image size
    ctrl_points = to_numpy(ctrl_points)
    ctrl_points[:, :, 0] = ctrl_points[:, :, 0] * (raw_width - 1)
    ctrl_points[:, :, 1] = ctrl_points[:, :, 1] * (raw_height - 1)
    ctrl_points = ctrl_points.astype(np.int)

    # tensors to pil images
    raw_images = raw_images.permute(0, 2, 3, 1)
    raw_images = to_numpy(raw_images)
    raw_images = (raw_images * 0.5 + 0.5) * 255
    rectified_images = rectified_images.permute(0, 2, 3, 1)
    rectified_images = to_numpy(rectified_images)
    rectified_images = (rectified_images * 0.5 + 0.5) * 255

    # draw images on canvas
    vis_images = []
    num_sub_plot = 2
    raw_images = raw_images.astype(np.uint8)
    rectified_images = rectified_images.astype(np.uint8)
    for i in range(batch_size):
        fig = plt.figure()
        ax = [fig.add_subplot(num_sub_plot, 1, j + 1) for j in range(num_sub_plot)]
        for a in ax:
            a.set_xticklabels([])
            a.set_yticklabels([])
            a.axis('off')
        ax[0].imshow(raw_images[i])
        ax[0].scatter(ctrl_points[i, :, 0], ctrl_points[i, :, 1], marker='+', s=5)
        ax[1].imshow(rectified_images[i])
        # plt.subplots_adjust(wspace=0, hspace=0)
        plt.show()
        buffer_ = BytesIO()
        plt.savefig(buffer_, format='png', bbox_inches='tight', pad_inches=0)
        plt.close()
        buffer_.seek(0)
        dataPIL = Image.open(buffer_)
        data = np.asarray(dataPIL).astype(np.uint8)
        buffer_.close()

        vis_images.append(data)

    # save to disk
    if vis_dir is None:
        return vis_images
    else:
        pred_list, targ_list = get_str_list(preds, targets, dataset)
        file_path_list = []
        for id, (image, pred, target) in enumerate(zip(vis_images, pred_list, targ_list)):
            if pred.lower() == target.lower():
                flag = 'right'
            else:
                flag = 'error'
            file_name = '{:}_{:}_{:}_{:}.png'.format(flag, id, pred, target)
            file_path = os.path.join(vis_dir, file_name)
            file_path_list.append(file_path)

        with Pool(os.cpu_count()) as pool:
            pool.starmap(_save_plot_pool, zip(vis_images, file_path_list))



def attention_vis(rectified_images, attention_map, preds, targets, dataset, vis_dir):
    rectified_images = rectified_images.permute(0, 2, 3, 1)
    rectified_images = to_numpy(rectified_images)
    rectified_images = (rectified_images * 0.5 + 0.5) * 255
    rectified_images = rectified_images.astype(np.uint8)
    attention_map = attention_map.unsqueeze(2)
    attention_map = to_numpy(attention_map)
    b, step = attention_map.shape[:2]
    for i in range(b):
        img = Image.fromarray(rectified_images[i])
        alpha = attention_map[i]
        for j in range(len(alpha)):
            heat_map = Image.fromarray(np.uint8(cm.rainbow(alpha[j] * 50) * 255))
            heat_map = heat_map.convert('RGB')
            heat_map = heat_map.resize(img.size, resample=Image.BICUBIC)
            heat_map = Image.blend(img, heat_map, alpha=0.5)
            # heat_map.save(f'./atten_heatmaps_1st_layer_aug_2/{step}.png')
            pass


def vis_2Dattention(self, image, atten_weights):
    """
    注意力可视化
    image : h*w*c
    atten_weights: step*h*w*1
    """
    for step in range(len(atten_weights)):
        heat_map = Image.fromarray(np.uint8(cm.rainbow(atten_weights[step] * 50) * 255))
        heat_map = heat_map.convert('RGBA')
        heat_map = heat_map.resize(image.size, resample=Image.BICUBIC)
        heat_map = Image.blend(image, heat_map, alpha=0.5)
        heat_map.save(f'./atten_heatmaps_1st_layer_aug_2/{step}.png')

