"""可视化参数模块"""

import enum
import os
from sys import argv
from numpy.lib.type_check import imag
import torch
import matplotlib.pyplot as plt
import argparse
from random import choices
from taa_core.config import cfg
from taa_core.utils.checkpoint import DetectronCheckpointer
from visualer.network import GeneralizedRCNN
from PIL import Image, ImageDraw
import torchvision.transforms as T
import torchvision.transforms.functional as F
from torchvision import utils
from taa_core.data.transforms import Normalize
from taa_core.data.transforms import Resize
from taa_core.data.build import make_data_loader
import numpy as np
from tqdm import tqdm
import cv2

torch.set_printoptions(precision=3, sci_mode=False, threshold=65536)


_COLORS = ["red", "blue", "yellow", "orange", "purple", "green"]


def randomColors(k):
    colors = _COLORS[:k]
    return colors


def visual_core_edge_feats(dots):
    """visual special object's core feature response and \\
    edge feature response in special level feature maps  \\
    with its grouth truth box
    """
    plt.ion()
    f = plt.figure(1, figsize=(20,16))
    f.canvas.set_window_title("fdosd")
    plt.xlabel("IoU", fontsize=18)
    plt.ylabel("score w/ IoU-ness", fontsize=16)
    plt.xlim(0,1)
    plt.ylim(0,1)
    # plt.title("fdosd")
    
    cls, iou, color = dots
        # print(cls)
        # da = 1 / len(cls)
        # a = 1
        # for idx,(c,i) in enumerate(zip(cls, iou)):
        #     plt.scatter(y=c, x=i, c=color, alpha=a)
        #     # plt.annotate(F"{idx}", (i, c))
        #     if a-da != 0:
        #         a -= da
    plt.scatter(y=cls, x=iou, c=color, s=15)
    
    plt.show()
    input(F"enter to continue")
    plt.close('all')

def show_heatmap(features, name, h, w, s):
    features = features[0]
    features = features.cpu().sigmoid().numpy()
    # h, w = features.shape[-2:]
    imgs = np.zeros((h*16+16, w*16+16, 3), dtype=np.uint8)
    
    r = []
    for idx,feat in enumerate(features):
        feat = feat * 255
        feat = feat.astype(np.uint8)
        feat = cv2.resize(feat, (w, h))
        img = cv2.applyColorMap(feat, cv2.COLORMAP_JET)
        hh = h+1
        ww = w+1
        f = np.zeros((hh,ww,3), dtype=np.uint8)
        f[0:h,0:w,:] = img
        r.append(f)
    h += 1
    w += 1
    for i in range(16):
        for j in range(16):
            imgs[i*h:(i+1)*h, w*j:w*(j+1), :] = r[i*16+j]

    cv2.imshow(F"{name}-{s}", imgs)
    cv2.waitKey(-1)


def visual(imgdir="ori_images"):
    parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default="configs/mla/mla_R_50_FPN_1x.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--local_rank", type=int, default=0)
    # parser.add_argument("--image", type=str)
    # parser.add_argument("--bbox", type=str)
    # parser.add_argument("--cat", type=int)
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    if not os.path.exists(imgdir):
        os.makedirs(imgdir)

    network = GeneralizedRCNN(cfg)
    network.to(cfg.MODEL.DEVICE)
    output_dir = cfg.OUTPUT_DIR
    checkpointer = DetectronCheckpointer(cfg, network, save_dir=output_dir)
    _ = checkpointer.load(cfg.MODEL.WEIGHT)

    data_loader = make_data_loader(cfg, is_train=False, is_distributed=False)[0]

    fp = open("anchors.txt", mode="a+")
    anchors = 1
    red = (255,0,0)
    green = (84, 255, 81)
    with torch.no_grad():
        for batch in tqdm(data_loader):
            
            images, targets, image_ids = batch
            
            # 242
            if image_ids[0] not in [2709, ]:
                continue
          
            targets = [target.to(cfg.MODEL.DEVICE) for target in targets]

        
            pos_boxes, neg_boxes = network(images.to(cfg.MODEL.DEVICE), targets)
            if pos_boxes is None or neg_boxes is None:
                continue
            if len(pos_boxes) == 0 or len(neg_boxes) == 0:
                continue

            for i, target in enumerate(targets):
                w, h = target.size
                pos_boxes[i][:, 0][pos_boxes[i][:, 0] < 0] = 0
                pos_boxes[i][:, 1][pos_boxes[i][:, 1] < 0] = 0
                pos_boxes[i][:, 2][pos_boxes[i][:, 2] > w] = w
                pos_boxes[i][:, 3][pos_boxes[i][:, 3] > h] = h

            # 预测框
            for idx, imid in enumerate(image_ids):
                fp.write(f"pred in {suffix} of {imid}: \n" )
                fp.write(f"image size: {targets[idx].size[0]}, {targets[idx].size[1]} \n")
                fp.write("pos: \n")
                if len(pos_boxes[idx]) > 0:
                    for pos in pos_boxes[idx]:
                        if len(pos) > 0:
                            fp.write(f"[{pos[0]}, {pos[1]}, {pos[2]}, {pos[3]}], \n")
                fp.write("neg: \n")
                if len(neg_boxes[idx]) > 0:
                    for neg in neg_boxes[idx]:
                        if len(neg) > 0:
                            fp.write(f"[{neg[0]}, {neg[1]}, {neg[2]}, {neg[3]}], \n")
            continue
            
            anchors = 0

            # 绘图
            ori_images = []
            for img, img_id in zip(images.tensors, image_ids):
                img = imageSavePIL(img, normalization=True)
                ori_images.append(img)
            
            line_width = 2
            for i in range(len(ori_images)):
                img = ori_images[i]
                img_id = image_ids[i]
                filename = f"{imgdir}/{fprefix}{img_id}.png"
                aimg = ImageDraw.ImageDraw(img)
                if len(neg_boxes[i]) > 0:
                    for neg in neg_boxes[i]:
                        if len(neg) > 0:
                            aimg.rectangle(((neg[0], neg[1]), (neg[2], neg[3])), fill=None, outline=green, width=line_width)
                if len(pos_boxes[i]) > 0:
                    for pos in pos_boxes[i]:
                        if len(pos) > 0:
                            aimg.rectangle(((pos[0], pos[1]), (pos[2], pos[3])), fill=None, outline=red, width=line_width)
                img.save(filename)
    if anchors == 1:
        fp.write("\n \n")
        fp.flush()
        fp.close()


def imageSavePIL(image: torch.Tensor, normalization=True,
    mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.], to_BGR=True,
):
    if normalization:
        mean = torch.as_tensor(mean, dtype=image.dtype, device=image.device)
        mean = mean.view(-1, 1, 1)
        std = torch.as_tensor(std, dtype=image.dtype, device=image.device)
        std = std.view(-1, 1, 1)
        image.mul_(std).add_(mean)
    if to_BGR:
        image = image[[2,1,0]]
    image /= 255
    image=T.ToPILImage()(image)
    #存储图片
    return image

if __name__ == '__main__':
    import sys
    suffix = sys.argv[7]
    fprefix = sys.argv[8]+"_" if len(sys.argv) == 9 else ""
    sys.argv = sys.argv[:-2] if len(sys.argv) == 9 else sys.argv[:-1]
    visual(imgdir=f"locationQs/{suffix}")

