import os
import cv2
import math
import time
import torch
import numpy as np
import torch.nn as nn

#from backbone.resnet_dcn import ResNet
from backbone.dlanet_dcn_nl import DlaNet as DlaNet
# from backbone.dlanet import DlaNet as DlaNet

from loss.Loss import _gather_feat,_transpose_and_gather_feat
from PIL import Image, ImageDraw
from datasets_loader.dataloader_hrsc import get_affine_transform

# 指定 model 路径
vgg16_npy_pyth = './vgg16.npy'
# 内容图像 路径
content_img_path = './imgs/imgs_nwpu/test/001.jpg'


def read_img(img_name):
    '''
    读取图片
    :param img_name: 图片路径
    :return: 4维矩阵
    '''
    img = Image.open(img_name)
    np_img = np.array(img) # 224, 224, 3
    # 需要传化 成 4 维
    np_img = np.asarray([np_img], dtype = np.int32) # 这个函数作用不太理解 (1, 224, 224, 3)
    return np_img



def get_row_col(num_pic):
    '''
    计算行列的值
    :param num_pic: 特征图的数量
    :return:
    '''
    squr = num_pic ** 0.5
    row = round(squr)
    col = row + 1 if squr - row > 0 else row
    return row, col

def visualize_feature_map(feature_batch):
    '''
    创建特征子图，创建叠加后的特征图
    :param feature_batch: 一个卷积层所有特征图
    :return:
    '''
    feature_map = np.squeeze(feature_batch, axis=0)

    feature_map_combination = []
    plt.figure(figsize=(8, 7))

    # 取出 featurn map 的数量，因为特征图数量很多，这里直接手动指定了。
    #num_pic = feature_map.shape[2]

    row, col = get_row_col(25)
    # 将 每一层卷积的特征图，拼接层 5 × 5
    for i in range(0, 25):
        feature_map_split = feature_map[:, :, i]
        feature_map_combination.append(feature_map_split)
        plt.subplot(row, col, i+1)
        plt.imshow(feature_map_split)
        plt.axis('off')

    #plt.savefig('./mao_feature/feature_map2.png') # 保存图像到本地
    plt.show()


def visualize_feature_map_sum(feature_batch):
    '''
    将每张子图进行相加
    :param feature_batch:
    :return:
    '''
    feature_map = np.squeeze(feature_batch, axis=0)

    feature_map_combination = []

    # 取出 featurn map 的数量
    num_pic = feature_map.shape[2]

    # 将 每一层卷积的特征图，拼接层 5 × 5
    for i in range(0, num_pic):
        feature_map_split = feature_map[:, :, i]
        feature_map_combination.append(feature_map_split)

    # 按照特征图 进行 叠加代码

    feature_map_sum = sum(one for one in feature_map_combination)

    plt.imshow(feature_map_sum)
    #plt.savefig('./mao_feature/feature_map_sum2.png') # 保存图像到本地
    plt.show()

def pre_process(image):
    height, width = image.shape[0:2]
    inp_height, inp_width = 608, 608
    c = np.array([height / 2., width / 2.], dtype=np.float32)
    s = max(height, width) * 1.0
    trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
    
     #将仿射矩阵最后一列置为0， 最后一列表示平移。  现仅有缩放
#####################################################################
    trans_input[:,-1]=0.
#####################################################################
    
    inp_image = cv2.warpAffine(image, trans_input, (inp_width, inp_height),flags=cv2.INTER_LINEAR)
    t1 = time.time()

#     mean = np.array([0.5194416012442385,0.5378052387430711,0.533462090585746], dtype=np.float32).reshape(1, 1, 3)
#     std  = np.array([0.3001546018824507, 0.28620901391179554, 0.3014112676161966], dtype=np.float32).reshape(1, 1, 3)
    
#     inp_image = ((inp_image / 255. - mean) / std).astype(np.float32)

    images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) # 三维reshape到4维，（1，3，512，512） 
    
    images = torch.from_numpy(images)
#     meta = {'c': c, 's': s, 
#             'out_height': inp_height // 4, 
#             'out_width': inp_width // 4}
    return images, meta,t1


if __name__ == '__main__':
    
    
    model = DlaNet(34,heads = head )
    head = {'hm': 10, 'wh': 2, 'ang':1, 'reg': 2}
    device = torch.device('cuda')
    model.load_state_dict(torch.load(os.path.join(args.path,'last.pth')))#or best.pth?
    
    image = cv2.imread(image_name)
    images, meta,t1 = pre_process(image)
    images = images.to(device)
    
    with torch.no_grad():
        output = model(images)



# 读取 内容图像
    content_val = read_img(content_img_path)
    print(content_val.shape)

    content = tf.placeholder(tf.float32, shape = [1, 792, 1024, 3])


    # 载入模型， 注意：在python3中，需要添加一句： encoding='latin1'
    data_dict = np.load(vgg16_npy_pyth, encoding='latin1').item()


    # 创建图像的 vgg 对象
    vgg_for_content = VGGNet(data_dict)


    # 创建 每个 神经网络
    vgg_for_content.build(content)

    content_features = [vgg_for_content.conv1_2,
                        vgg_for_content.conv2_2,
                        vgg_for_content.conv3_3,
                        vgg_for_content.conv4_3,
                        vgg_for_content.conv5_3,
                        ]


    init_op = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_op)

        content_features = sess.run([content_features],
                     feed_dict = {
                         content:content_val
                     })

        conv1 = content_features[0][0]
        conv2 = content_features[0][1]
        conv3 = content_features[0][2]
        conv4 = content_features[0][3]
        conv5 = content_features[0][4]

        # 查看 每个 特征 子图
        visualize_feature_map(conv1)

        # 查看 叠加后的 特征图
        visualize_feature_map_sum(conv1)
