#使用测试的文件
#------------
# folder = "./img/8P Uneven"
folder = "./img/8P Even"
#-----------

#使用的模型
#--------------------
#第一部分分割的
exp_no = '01'
#第二部分关键点检测的
# exp_no_2 = '02'#uneven训练的
# exp_no_2 = '03'#even训练的
exp_no_2 = '04'#even 20epoch
#---------------------------------------

#是否使用可视化：使用可视化则设置look=1且注释掉look=0，并解开后面的注释
#--------------------------
look=0
# look=1 #look为1时才导入下面2跟模块
# import PySimpleGUI as sg
# import matplotlib.pyplot as plt
# if look==1:
#     print("look")
#------------------------------------


import os
import sys
import numpy as np
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from torchvision.models.detection import MaskRCNN
import torchvision.transforms as tT
import torchvision.transforms.functional as F
from torchvision.utils import draw_segmentation_masks
import torchvision.ops.boxes as box_ops
from PIL import Image, ExifTags
import json
import time
import numpy as np

# 导入本文件写的一些函数接口
from _pointer_meter_helpers import rotate_im_accord_exiftag, load_anno, load_valid_imfile_names,show,iou,my_NMS,remove_low_scores,fit_circle,get_center_seq,get_center_seq_blur,judge,judge2,get_reading,get_reading_zdir,pointer_to_read
sys.path.insert(0, './torchvision_det_references') #确保可以通过下面的语句导入位于子目录中的包
import transforms as T


time1 = time.time()

class MeterSegDataset(torch.utils.data.Dataset):
    def __init__(self, root, transforms, down_scale_factor=8):#down_scale_factor比例缩减因子
        self.root = root # root folder path
        self.transforms = transforms # data transformations
        self.down_scale_factor = down_scale_factor # 图像缩小为原图的比例

        self.imgs = load_valid_imfile_names(root)

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self, idx):
        fpath = os.path.join(self.root, self.imgs[idx])
        img = Image.open(fpath)
        img = rotate_im_accord_exiftag(img)#处理手机拍照时的旋转问题
        img = img.convert('RGB')#如果不使用.convert('RGB')进行转换的话，读出来的图像是RGBA四通道的，A通道为透明通道
        #缩小图像
        im_sz=(img.size[0]//self.down_scale_factor, img.size[1]//self.down_scale_factor)#这里size返回的是宽高而不是像素点多少
        img = img.resize(im_sz)
        #载入标注信息
        anno = load_anno(self.root, self.imgs[idx], self.down_scale_factor)
        #获得包围盒信息
        boxes = []
        for b in anno['mask_boxes']: #遍历标注信息的mask_boxes（需要可以去原函数把注释解掉）
            boxes.append(b)
        
        boxes = torch.as_tensor(boxes, dtype=torch.float32)

        #获得类别信息，只有1类目标就是仪表
        num_objs = len(boxes)
        labels = torch.ones((num_objs,), dtype=torch.int64)
        masks = torch.as_tensor(anno['masks'], dtype=torch.uint8)
        '''当mask的数据类型是torch.uint8时，此时的tensor用作mask，tensor中的1对应的行/列保留，
        0对应的行/列舍去。且被mask的维度必须与原始tensor的维度一致.
        当masks的数据类型是torch.long时，此时的tensor用作index，tensor中的每个数字代表着将要取出的tensor的行列索引'''
        #获取其他coco格式的必要信息
        image_id = torch.tensor([idx])
        area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])

        iscrowd = torch.zeros((num_objs,), dtype=torch.int64)


        #获得zdir信息(附加为了计算)
        zdir = []
        for dir in anno['zdir']:
            pt1 = [v for v in dir[:2]]
            pt1.append(1)
            pt2 = [v for v in dir[2:]]
            pt2.append(1)
            pts = [pt1, pt2]
            zdir.append(pts)
        zdir = torch.as_tensor(zdir, dtype=torch.float32)

        #获得keypoint信息
        keypoints = []
        for dir in anno['pdir']:
            pt1 = [v for v in dir[:2]]
            pt1.append(1)
            pt2 = [v for v in dir[2:]]
            pt2.append(1)
            pts = [pt1, pt2]

            keypoints.append(pts)

        keypoints = torch.as_tensor(keypoints, dtype=torch.float32)

        #target字典获得标签信息
        target = {}
        target["boxes"] = boxes
        target["labels"] = labels
        target["masks"] = masks
        target["image_id"] = image_id
        target["area"] = area
        target["iscrowd"] = iscrowd
        target["zdir"] = zdir
        target["keypoints"] = keypoints

        #进行变化
        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target


    # %% define the mask faster-rcnn -based model
#我们数据集共2个类别，背景和指针
num_classes = 2 
# 加载在COCO上预先训练的实例分割模型(实例分割模型）
model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=None)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.load_state_dict(torch.load('./weights/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth', map_location=device))

# 获取分类器的输入特征数
in_features = model.roi_heads.box_predictor.cls_score.in_features
# 用新的头替换预先训练好的头
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)

# 现在获取掩码分类器的输入特征数量
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# 并用新的掩码预测器替换掩码预测器
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,hidden_layer,num_classes)
fn = './weights/model_weights_seg_'+exp_no+'.pth'
print(f"device1:{device}")
model.load_state_dict(torch.load(fn, map_location=torch.device(device=device)))
model.to(device)

model.eval()

# %% prepare for training

def get_transform(train):
    transforms = []
    transforms.append(T.PILToTensor())
    transforms.append(T.ConvertImageDtype(torch.float))
    if train:
        transforms.append(T.RandomPhotometricDistort())
    return T.Compose(transforms)
dataset = MeterSegDataset(folder, get_transform(train=True), down_scale_factor=8)
dataset_test = MeterSegDataset(folder, get_transform(train=False),down_scale_factor=8)

# dataset = MeterSegDataset(folder, get_transform(train=True), down_scale_factor=8)
# dataset_test = MeterSegDataset(folder, get_transform(train=False),down_scale_factor=8)

# 划分训练集和测试集的数据
indices = torch.randperm(len(dataset)).tolist()#randperm（）随机打乱函数
dataset = torch.utils.data.Subset(dataset, indices[:-30])# Subset获取指定一个索引序列对应的子数据集
dataset_test = torch.utils.data.Subset(dataset_test, indices[-30:])




'''我们需要将这些层设置到预测模式，model.eval()就是帮我们一键搞定的,
相应的，在训练之前，我们也要记得将这些特殊的层设置到训练模式：model.train()'''



#展示分割结果
if look==1:
    layout = [  [sg.Text('共有'+str(len(dataset_test))+"个带标注的水表图像")],
        [sg.Text('请输入要查看的图像序号(从0开始):'), sg.InputText('0')],            
        [sg.Button('Ok'), sg.Button('Cancel')] ]
    # Create the Window
    window1 = sg.Window('检测结果查看程序', layout)
    plt.ion()#打卡动态画图 i on
    fig = plt.figure()
    # Event Loop to process "events" and get the "values" of the inputs
    while True:
        event, values = window1.read()
        if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
            plt.close(fig)
            break
        if(int(values[0])>=0 and int(values[0])<len(dataset_test)):
            fid = int(values[0])
            img,label = dataset_test.__getitem__(fid)
            
            x = img.unsqueeze(0)#.unsqueeze(0)增加维度（0表示，在第一个位置增加维度）
            x = x.to(device)
            predictions = model(x)  
            
            plt.cla()## 清除axes，即当前 figure 中的活动的axes，但其他axes保持不变
            #show the original image
            mask = predictions[0]['masks']
            # pil_img = tT.ToPILImage()(img)
            pil_img = tT.ToPILImage()(mask[0])
            plt.imshow(pil_img)

            # #显示包围盒的标签信息
            # boxes_gt = label['boxes'].numpy()
            # nbox = boxes_gt.shape[0]
            # for i in range(nbox):
            #     box = boxes_gt[i,:]
            #     w = box[2]-box[0]
            #     h = box[3]-box[1]
            #     #gca就是get current axes  ，add_patch()函数用于将补丁添加到轴的补丁；返回补丁
            #     plt.gca().add_patch(plt.Rectangle((box[0],box[1]), w,h,fill=False, color='green'))

            #显示预测后NUMS的包围盒  
            boxes = predictions[0]['boxes'].cpu().detach().numpy()
            scores = predictions[0]['scores'].cpu().detach().numpy()
            nms_threshold = 0.5
            selected_idx = my_NMS(boxes, scores, nms_threshold,1)
            for i in selected_idx:
                box = boxes[i,:]
                w = box[2]-box[0]
                h = box[3]-box[1]
                plt.gca().add_patch(plt.Rectangle((box[0],box[1]), w,h,fill=False, color='red'))
            
            plt.draw()
    window1.close()

time2 = time.time()
#计算预测成功率并裁剪表盘
num_success=0
imgs=[]
for i in range(len(dataset_test)):
    img,label = dataset_test.__getitem__(i)

    x = img.unsqueeze(0)#.unsqueeze(0)增加维度（0表示，在第一个位置增加维度）
    x = x.to(device)
    predictions = model(x)

    boxes = predictions[0]['boxes']
    scores = predictions[0]['scores']
    nms_threshold = 0.5
    selected_idx = my_NMS(boxes, scores, nms_threshold,1)
    for j in selected_idx:
        selected_box = boxes[j]
        '''box_ops.nms() 是 torch 中提供的非极大值抑制函数，nms_threshold 是非极大值抑制的阈值。
        box_ops.nms() 的返回值是一个列表，包含了经过非极大值抑制后剩下的框的索引。
        然后，我们可以使用 selected_idx 来选择非极大值抑制后剩下的框，即 selected_box = boxes[selected_idx]。'''

        w1 = selected_box[2]-selected_box[0]
        h1 = selected_box[3]-selected_box[1]

        #包围盒的标签信息
        boxes_gt = label['boxes'].numpy()
        box = boxes_gt[0,:]
        w2 = box[2]-box[0]
        h2 = box[3]-box[1]

        #计算与真实值的IOU，超过设定的threshold则表示预测成功
        threshold=0.5
        Iou=iou(selected_box[0],selected_box[1],w1,h1,box[0],box[1], w2,h2)
        print(f"第 {i+1} 张, 当前的iou值为: {Iou}")
        if Iou>threshold:
            num_success+=1

        # 直接从tensor图像获取区域cropped_tensor = tensor[:, y1:y2, x1:x2]
        # 但是这里的x，y需要是整数，PIL图像的crop可以是小数级别的
        meter_img=img[:,int(selected_box[1]):int(selected_box[3]),int(selected_box[0]):int(selected_box[2])]
        imgs.append(meter_img)
time2 = time.time()
print(f"表盘在测试集准确率：{num_success/len(dataset_test)*100}%")
print(f"len(imgs):{len(imgs)}")

time3 = time.time()

#--------------------------------------------------------------
# 第二部分：包围框以及关键点方法检测指针

from torchvision.models.detection.keypoint_rcnn import KeypointRCNNPredictor

# %% define the mask faster-rcnn -based model
#-----------------------------------------------------------------------------
# 2个类别，背景和指针盘
num_classes = 2
# 2个关键点
num_keypoints = 2
# 加载模型预训练的关键点检测模型
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights=None) 
# 加载预训练权重
model.load_state_dict(torch.load('./weights/keypointrcnn_resnet50_fpn_coco-fc266e95.pth', map_location=device)) 
# 获取输入特征数并用新的头替换预先训练好的头
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
in_features2 = model.roi_heads.keypoint_predictor.kps_score_lowres.in_channels
model.roi_heads.keypoint_predictor = KeypointRCNNPredictor(in_features2, num_keypoints)



exp_name = 'pt-dir-detection'
fn2 = './weights/model_weights_'+exp_name+'_'+exp_no_2+'.pth'

print(f"device2:{device}")
model.load_state_dict(torch.load(fn2, map_location=torch.device(device=device)))
model.to(device)
model.eval()


#------------------------------------------------------------------------------------------------
# 读数阶段
if look ==1:
    layout = [  [sg.Text('共有'+str(len(dataset_test))+"个带标注的水表图像")],
        [sg.Text('请输入要查看的图像序号(从0开始):'), sg.InputText('0')],            
        [sg.Button('Ok'), sg.Button('Cancel')] ]
    
    
    # Create the Window
    window2 = sg.Window('检测结果查看程序', layout)
    plt.ion()
    fig = plt.figure()
    # Event Loop to process "events" and get the "values" of the inputs
    while True:
        event, values = window2.read()
        if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
            plt.close(fig)
            break
        if(int(values[0])>=0 and int(values[0])<len(dataset_test)):
            fid = int(values[0])
            img,label = dataset_test.__getitem__(fid)

            #计算真实值
            zdir=label["zdir"].numpy()
            keypoints = label['keypoints'].numpy()
            total=get_center_seq(keypoints)
            # total=get_center_seq_blur(keypoints)
            zdir=[zdir[0,1,0],zdir[0,1,1],zdir[0,0,0],zdir[0,0,1],]#x1,y1,x2,y2(依次刻度5，刻度1的坐标)
            ground_value=pointer_to_read(total,8,zdir)
            print("ground_value:",ground_value)
            #计算为读数
            true_read=0
            for j in range(8):
                true_read=true_read*10+ground_value[j]
            print("true_read:",true_read)

            img=imgs[fid]
            x = img.unsqueeze(0)
            x = x.to(device)
            predictions = model(x)  
            
            #计算预测值
            boxes = predictions[0]['boxes']
            scores = predictions[0]['scores']
            # print(len(scores))
            #置信度得分低于score_threshold的去除
            score_threshold=0.9
            boxes,scores=remove_low_scores(boxes,scores,score_threshold)
            #iou大于这个阈值则去除
            nms_threshold = 0.1
            selected_idx = my_NMS(boxes, scores, nms_threshold,8)#这个是我自己写的NMS函数，8是要选取的目标个数8个框
            # selected_idx = box_ops.nms(boxes, scores, nms_threshold)
            keypoints = predictions[0]['keypoints'].cpu().detach().numpy()
            total=get_center_seq(keypoints,selected_idx)
            # total=get_center_seq_blur(keypoints,selected_idx)
            predict_value=pointer_to_read(total,8,zdir)#按照标签的水平方向读数读数
            # predict_value=pointer_to_read(total,8)#以水平方向读数
            print("predict_value:",predict_value)
            predict_read=0
            for j in range(8):
                predict_read=predict_read*10+predict_value[j]
            print("predict_read:",predict_read)


            keypoints = predictions[0]['keypoints'].cpu().detach().numpy()
            boxes = predictions[0]['boxes'].cpu().detach().numpy()
            scores = predictions[0]['scores'].cpu().detach().numpy()
            plt.cla()
            #show the image        
            pil_img = tT.ToPILImage()(img)
            plt.imshow(pil_img)

            #置信度得分低于score_threshold的去除
            score_threshold=0.9
            boxes,scores=remove_low_scores(boxes,scores,score_threshold)
            print(len(scores))
            #iou大于这个阈值则去除
            nms_threshold = 0.1
            selected_idx = my_NMS(boxes, scores, nms_threshold,8)#这个是我自己写的NMS函数，8是要选取的目标个数8个框
            # selected_idx = box_ops.nms(boxes, scores, nms_threshold)
            print(selected_idx)

            #显示筛选过的预测特征点和包围盒
            
            for i in selected_idx:
            # for i in range(len(keypoints)):
                plt.plot(keypoints[i,0,0],keypoints[i,0,1],'r*')
                plt.plot(keypoints[i,1,0],keypoints[i,1,1],'g*')
                box = boxes[i,:]
                w = box[2]-box[0]
                h = box[3]-box[1]
                plt.gca().add_patch(plt.Rectangle((box[0],box[1]), w,h,fill=False, color='red'))
            # reading=1234
            # #  ha='left', va='bottom', 
            # plt.text(0, 1, 'reading: {:.2f}'.format(reading), color='blue', fontsize=15,transform=plt.gca().transAxes)
            true=111
            pre=222
            layout2 = [ [sg.Text('真实读数：{:.2f}'.format(true_read))], [sg.Text('预测读数：{:.2f}'.format(predict_read))], [sg.Button('Ok')] ]
            window = sg.Window('读数信息', layout2,location=(1000, 400))#location x，y  前者横移动后者竖着移动
            while True:
                event, values = window.read()
                if event == sg.WIN_CLOSED or event == 'Ok':
                    window.close()
                    break
            plt.draw()

    window2.close()

time4 = time.time()
# 开始计算
num_success_pointer=0
for i in range(len(dataset_test)):
    # for i in range(1):
    print("--------------------------")
    print(f"第 {i+1} 张")
    #先获取真实标签
    img,label = dataset_test.__getitem__(i)
    #再获取裁剪的图
    img=imgs[i]
    #计算真实值
    zdir=label["zdir"].numpy()
    keypoints = label['keypoints'].numpy()
    total=get_center_seq(keypoints)
    # total=get_center_seq_blur(keypoints)
    zdir=[zdir[0,1,0],zdir[0,1,1],zdir[0,0,0],zdir[0,0,1],]#x1,y1,x2,y2(依次刻度5，刻度1的坐标)
    ground_value=pointer_to_read(total,8,zdir)
    print("ground_value:",ground_value)
    #计算为读数
    true_read=0
    for j in range(8):
        true_read=true_read*10+ground_value[j]
    print("true_read:",true_read)

    x= img.unsqueeze(0)
    x = x.to(device)
    predictions = model(x)

    #计算预测值
    boxes = predictions[0]['boxes']
    scores = predictions[0]['scores']
    # print(len(scores))

    #置信度得分低于score_threshold的去除
    score_threshold=0.9
    boxes,scores=remove_low_scores(boxes,scores,score_threshold)
    #iou大于这个阈值则去除
    nms_threshold = 0.1
    selected_idx = my_NMS(boxes, scores, nms_threshold,8)#这个是我自己写的NMS函数，8是要选取的目标个数8个框
    # selected_idx = box_ops.nms(boxes, scores, nms_threshold)
    keypoints = predictions[0]['keypoints'].cpu().detach().numpy()
    total=get_center_seq(keypoints,selected_idx)
    # total=get_center_seq_blur(keypoints,selected_idx)
    predict_value=pointer_to_read(total,8,zdir)#按照标签的水平方向读数读数
    # predict_value=pointer_to_read(total,8)#以水平方向读数
    print("predict_value:",predict_value)
    predict_read=0
    for j in range(8):
        predict_read=predict_read*10+predict_value[j]
    print("predict_read:",predict_read)

    #计算匹配成功个数
    for i in range(8):
        if ground_value[i]==predict_value[i]:
            num_success_pointer+=1
time5 = time.time()
print(f"表盘在测试集准确率：{num_success/len(dataset_test)*100}%")
print(f"指针在测试集准确率：{num_success_pointer/(8*len(dataset_test))*100}%")
# print(f"表盘检测用时：{time3-time2} S")
# print(f"指针检测以及读数用时：{time5-time4} S")
# print(f"总用时：{time5-time1} S")