import numpy as np
import torchvision
import time
import os
import copy
import pdb
import time
import argparse
import torch.nn.functional as F

import sys
import cv2
import pandas as pd
import skimage
import albumentations as albu
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from retinanet import model
from heatmap_models.model import model_cpn50, model_resnet_unet, model_resnet_unet_GN, model_hrnet, \
    model_Unet_efficient_b3
from retinanet.dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \
    UnNormalizer, Normalizer

assert torch.__version__.split('.')[0] == '1'

print('CUDA available: {}'.format(torch.cuda.is_available()))

iou_thresh = 0.3
score_thresh = 0.01


def transform_image(image, min_side=650, max_side=900):
    image = image.astype(np.float32)
    image = image / 255.
    ## normalize
    mean = np.array([[[0.485, 0.456, 0.406]]])
    std = np.array([[[0.229, 0.224, 0.225]]])
    image = (image.astype(np.float32) - mean) / std
    ## resize
    rows, cols, cns = image.shape
    smallest_side = min(rows, cols)
    scale = min_side / smallest_side
    largest_side = max(rows, cols)
    if largest_side * scale > max_side:
        scale = max_side / largest_side
    
    # resize the image with the computed scale
    image = skimage.transform.resize(image, (int(round(rows * scale)), int(round((cols * scale)))))
    rows, cols, cns = image.shape
    ## 为了能完成5次下采样，必须能被32整除。进行pad操作。
    pad_w = 128 - rows % 128
    pad_h = 128 - cols % 128
    new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
    new_image[:rows, :cols, :] = image.astype(np.float32)
    new_image = np.transpose(new_image, (2, 0, 1))
    ## 因为经过了pad操作。需要知道pad之前的尺寸，才好对预测出的关键的坐标进行转换。
    return torch.from_numpy(new_image[np.newaxis, ...]), (rows, cols)


def main(args=None):
    parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
    parser.add_argument('--if_show', help='if show the pred images', default=False)
    parser.add_argument('--show_image_path', help='the path to save show image', default='../checkpoints/show/cewei')
    parser.add_argument('--result_csv', help='the dir of csv file to save pred result', default='/checkpoints')
    parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='csv')
    parser.add_argument('--coco_path', help='Path to COCO directory')
    parser.add_argument('--test_data_path', help='Path of data to test',
                        default='/data')
    parser.add_argument('--model_path', help='Path to model', type=str,
                        default='./model_save/cewei_dete.pth')

    parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152',
                        default='new_efficientdet_b3')
    
    parser = parser.parse_args(args)

    # Create the model
    if parser.depth == 18:
        retinanet = model.resnet18(num_classes=2, pretrained=True)
    elif parser.depth == 34:
        retinanet = model.resnet34(num_classes=2, pretrained=True)
    elif parser.depth == 50:
        retinanet = model.resnet50(num_classes=2, pretrained=True)
    elif parser.depth == 101:
        retinanet = model.resnet101(num_classes=2, pretrained=True)
    elif parser.depth == 152:
        retinanet = model.resnet152(num_classes=2, pretrained=True)
    elif parser.depth == 'efficientnet-b0':
        retinanet = model.resnet18(pretrained=False, num_classes=2,
                                   effici_name='efficientnet-b0')
    elif parser.depth == 'new_efficientdet_b3':
        retinanet = model.new_efficientdet_b3(pretrained=False, num_classes=2, thresh=iou_thresh)
    else:
        raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
    
    os.makedirs(parser.show_image_path, exist_ok=True)
    retinanet = torch.nn.DataParallel(retinanet).cuda()
    # load trained model weights
    trained_model_state_dict = torch.load(parser.model_path)
    # trained_model_state_dict = {k.replace('module.', ''): v for k, v in trained_model_state_dict.items()}
    retinanet.load_state_dict(trained_model_state_dict)
    
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        retinanet = retinanet.cuda()
    
    retinanet.eval()
    
    unnormalize = UnNormalizer()
    
    def draw_caption_label(image, box, caption):
        
        b = np.array(box).astype(int)
        ## 用黑色和白色分别写一次，白色较细在黑色上面，容易显示
        cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
        cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
    
    def draw_caption_pred(image, box, caption):
        
        b = np.array(box).astype(int)
        ## 用黑色和白色分别写一次，白色较细在黑色上面，容易显示
        cv2.putText(image, caption, (b[2], b[3] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
        cv2.putText(image, caption, (b[2], b[3] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
    
    dir_list = os.listdir(parser.test_data_path)
    # csv_data = pd.read_csv('../data/csv_label/0522/cewei/cewei_transform_val.csv')

    ## 用来方便将每张图的positive和l2 loss统计到一起
    for name in dir_list:
    # for index in range(len(csv_data)):
    #     name = csv_data.iloc[index, 0].split('/')[-1].replace('npz', 'png')
        ## 存储单张图像的所有点坐标
        image_heatmap_pred = []
        with torch.no_grad():
            ## 读取label point存在list中
            ## 读取图片然后进行转换作为模型的输入
            image_dir = os.path.join(parser.test_data_path, name)
            # image_dir = (os.path.join('../data/data/jpg_file_new',
            #                           csv_data.iloc[index, 0].split('/')[-1].replace('npz', 'png'))).replace('\\', '/')
            try:
                image = skimage.io.imread(image_dir)
                image = skimage.color.gray2rgb(image)
                show_image = skimage.io.imread(image_dir)
                show_image = skimage.color.gray2rgb(show_image)
            except FileNotFoundError:
                continue


            original_image_shape = image.shape[:-1]
            ## 转成检测模型的输入.同时传入pad之前的shape，用来对预测的坐标进行转换
            # print(image.sum())
            detec_image, detec_image_shape = transform_image(image)
            scores, classification, transformed_anchors = retinanet(detec_image.cuda().float())

            scores = np.where()
            ## 将预测结果重新存在list中，list中每个元素分别为[x1,y1,x2,y2,class,csore].
            ## scores, classification, transformed_anchors 按照顺序一一对应的
            box_list = []
            for i in range(len(scores)):
                box_list.append(transformed_anchors[i, :])
            ## 根据box的坐标进行排序.根据每个box第二个元素大小进行排序
            box_list = sorted(box_list, key=lambda j: j[1])

            ## 得到s1锥体的索引。只保留一个
            s1_idx = np.where(classification.cpu() == 1)[0][0]
            box_list.append(transformed_anchors[s1_idx, :])

            
            # 得到每个box的坐标，逐个画出预测到置信度符合阈值的bbox.box按照位置从上到下的顺序读取的。
            for j in range(len(box_list)):
                bbox = box_list[j]
                x1 = int(bbox[0])
                y1 = int(bbox[1])
                x2 = int(bbox[2])
                y2 = int(bbox[3])
                
                ## 将detection模型中得到的坐标转成成原图中的坐标
                x1 = int(x1 / detec_image_shape[1] * original_image_shape[1])
                y1 = int(y1 / detec_image_shape[0] * original_image_shape[0])
                x2 = int(x2 / detec_image_shape[1] * original_image_shape[1])
                y2 = int(y2 / detec_image_shape[0] * original_image_shape[0])
                # 画出预测的box
                cv2.rectangle(show_image, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=3)
                if j == 6:
                    cv2.putText(show_image,'S1',(x1,y1-10),cv2.FONT_HERSHEY_PLAIN,4,(0,0,255),4)
                

            # resize成指定的尺寸用来进行显示
            if parser.if_show is True:
                show_image = cv2.resize(show_image, (448, 896))
                cv2.imshow('img', show_image)
                cv2.waitKey(0)
            if parser.show_image_path is not None:
                cv2.imwrite(image_dir.replace(parser.test_data_path, parser.show_image_path), show_image)



if __name__ == '__main__':
    main()