import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QLabel, QFileDialog
from PyQt5.QtGui import QImage, QPixmap
from PIL import Image, ImageDraw,ImageFont 
import os
import numpy as np
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from torchvision.models.detection import MaskRCNN
import torchvision.transforms as tT
import torchvision.transforms.functional as F
from torchvision.utils import draw_segmentation_masks
import torchvision.ops.boxes as box_ops
from PIL import Image, ExifTags
import json
import time
import numpy as np
from torchvision.models.detection.keypoint_rcnn import KeypointRCNNPredictor
# 导入本文件写的一些函数接口
from _pointer_meter_helpers import rotate_im_accord_exiftag, load_anno, load_valid_imfile_names,show,iou,my_NMS,remove_low_scores,fit_circle,get_center_seq,get_center_seq_blur,judge,judge2,get_reading,get_reading_zdir,pointer_to_read

class MyApp(QWidget):
    def __init__(self):
        super().__init__()
        self.initUI()

    def initUI(self):
        self.setWindowTitle('指针式仪表读数')
        self.setGeometry(100, 100, 800, 600)

        # 布局
        layout = QVBoxLayout()
        
        self.imageLabel = QLabel('图像展示区')
        layout.addWidget(self.imageLabel)
        
        self.predictionLabel = QLabel('预测信息展示区')
        layout.addWidget(self.predictionLabel)

        btnLoadImage = QPushButton('选择图像', self)
        btnLoadImage.clicked.connect(self.loadImage)
        layout.addWidget(btnLoadImage)

        self.setLayout(layout)

    def loadImage(self):
        filePath, _ = QFileDialog.getOpenFileName(self, '选择图像', '', 'Image files (*.jpg *.gif *.png)')
        if filePath != "":
            self.performPrediction(filePath)



    def performPrediction(self, filePath):
        #---------------------第一阶段
        # 我们数据集共2个类别，背景和指针
        num_classes = 2 
        # 加载在COCO上预先训练的实例分割模型(实例分割模型）
        model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=None)
        device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        model.load_state_dict(torch.load('../weights/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth', map_location=device))

        # 获取分类器的输入特征数
        in_features = model.roi_heads.box_predictor.cls_score.in_features
        # 用新的头替换预先训练好的头
        model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)

        # 现在获取掩码分类器的输入特征数量
        in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
        hidden_layer = 256
        # 并用新的掩码预测器替换掩码预测器
        model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,hidden_layer,num_classes)
        exp_no = '01'
        fn = '../weights/model_weights_seg_'+exp_no+'.pth'
        model.load_state_dict(torch.load(fn, map_location=torch.device(device=device)))
        model.to(device)

        model.eval()

        img = Image.open(filePath)
        img = rotate_im_accord_exiftag(img)#处理手机拍照时的旋转问题
        img = img.convert('RGB')#如果不使用.convert('RGB')进行转换的话，读出来的图像是RGBA四通道的，A通道为透明通道
        im_sz=(500,500)
        img = img.resize(im_sz)

        # # 首先定义一个转换，将 PIL.Image 转换为 PyTorch 张量  
        transform = tT.Compose([  
            tT.ToTensor()  # 将 PIL.Image 或 ndarray 转换为 torch.Tensor，并归一化到 [0.0, 1.0]  
        ])  
        
        # 将 PIL.Image 转换为 PyTorch 张量  
        img = transform(img) 
        x = img.unsqueeze(0)#.unsqueeze(0)增加维度（0表示，在第一个位置增加维度）
        x = x.to(device)
        predictions = model(x)  

         #预测并获得裁剪图
        boxes = predictions[0]['boxes'].cpu().detach().numpy()
        scores = predictions[0]['scores'].cpu().detach().numpy()
        nms_threshold = 0.5
        selected_idx = my_NMS(boxes, scores, nms_threshold,1)
        selected_box = boxes[selected_idx[0]]
        img=img[:,int(selected_box[1]):int(selected_box[3]),int(selected_box[0]):int(selected_box[2])]
        img=tT.ToPILImage()(img.squeeze(0))  # 将 Tensor 转换回 PIL.Image，并移除增加的维度
        img.save("debug_image.png")

        # --------------------第二阶段
        img = Image.open("debug_image.png")
        # img = rotate_im_accord_exiftag(img)#处理手机拍照时的旋转问题
        img = img.convert('RGB')#如果不使用.convert('RGB')进行转换的话，读出来的图像是RGBA四通道的，A通道为透明通道
        im_sz=(500,500)
        img = img.resize(im_sz)


        # 2个类别，背景和指针盘
        num_classes = 2
        # 2个关键点
        num_keypoints = 2
        # 加载模型预训练的关键点检测模型
        model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights=None) 
        device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        # 加载预训练权重
        model.load_state_dict(torch.load('../weights/keypointrcnn_resnet50_fpn_coco-fc266e95.pth', map_location=device)) 
        # 获取输入特征数并用新的头替换预先训练好的头
        in_features = model.roi_heads.box_predictor.cls_score.in_features
        model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
        in_features2 = model.roi_heads.keypoint_predictor.kps_score_lowres.in_channels
        model.roi_heads.keypoint_predictor = KeypointRCNNPredictor(in_features2, num_keypoints)
        
        exp_name = 'pt-dir-detection'
        exp_no_2='04'
        fn2 = '../weights/model_weights_'+exp_name+'_'+exp_no_2+'.pth'

        model.load_state_dict(torch.load(fn2, map_location=torch.device(device=device)))
        model.to(device)
        model.eval()

        transform = tT.Compose([  
            tT.ToTensor()  # 将 PIL.Image 或 ndarray 转换为 torch.Tensor，并归一化到 [0.0, 1.0]  
        ])  
        
        # 将 PIL.Image 转换为 PyTorch 张量  
        img = transform(img) 
        x= img.unsqueeze(0)
        x = x.to(device)
        predictions = model(x)
        #计算预测值
        boxes = predictions[0]['boxes']
        scores = predictions[0]['scores']

        #置信度得分低于score_threshold的去除
        score_threshold=0.9
        boxes,scores=remove_low_scores(boxes,scores,score_threshold)
        #iou大于这个阈值则去除
        nms_threshold = 0.1
        selected_idx = my_NMS(boxes, scores, nms_threshold,8)#这个是我自己写的NMS函数，8是要选取的目标个数8个框

        keypoints = predictions[0]['keypoints'].cpu().detach().numpy()
        boxes = predictions[0]['boxes'].cpu().detach().numpy()
        total=get_center_seq(keypoints,selected_idx)
        predict_value=pointer_to_read(total,8)#按照标签读数
        predict_read=0
        for j in range(8):
            predict_read=predict_read*10+predict_value[j]  
        
        # 将PIL图像转换为Qt可用的QImage
        img_pre=tT.ToPILImage()(img.squeeze(0))  # 将 Tensor 转换回 PIL.Image，并移除增加的维度
        import matplotlib.pyplot as plt 
        plt.ion()#打卡动态画图 i on
        fig = plt.figure()
        plt.imshow(img_pre)

        draw = ImageDraw.Draw(img_pre)

        for i in selected_idx:
            # 绘制边界框
            box = boxes[i]
            draw.rectangle(box, outline="red")
            draw.text((10,10),f"predict_read:{predict_read}",font=ImageFont.load_default(),fill="red")
            # 绘制指针线
            draw.line(((keypoints[i,0,0],keypoints[i,0,1]),keypoints[i,1,0],keypoints[i,1,1]), fill="blue")
            

        

        data = img_pre.tobytes("raw", "RGB")
        img_pre.save("debug_image.png")
        # data = draw.tobytes("raw", "RGB")
        qImg = QImage(data, img_pre.size[0], img_pre.size[1], QImage.Format_RGB888)
        
        real_value = "xxx"
        # 更新图像展示区
        self.imageLabel.setPixmap(QPixmap.fromImage(qImg))
        # 更新预测信息展示区
        self.predictionLabel.setText(f"该表盘的读数为：{predict_read}")

        


        

def main():
    
    app = QApplication(sys.argv)
    ex = MyApp()
    ex.show()
    sys.exit(app.exec_())

if __name__ == '__main__':
    main()
