#%% config the app
#--------------------------------------------------------
exp_name = 'pt-dir-detection'
# exp_no = '2024-1-18-1'
exp_no = '02'

# mode = 'train'
mode = 'test'

# folder = "C:\\Users\\86198\\Desktop\\8针水表\\8针水表\\8P Uneven"
# folder = "./img/8P Uneven"
folder = "./img/8P Even"

num_epochs = 10
train_batch_size = 2
test_batch_size = 2

#实验结果：
#参考资料：How to Train a Custom Keypoint Detection Model with PyTorch https://medium.com/@alexppppp/how-to-train-a-custom-keypoint-detection-model-with-pytorch-d9af90e111da
#NOTE:按照参考资料，修改了子目录中的coco_eval.py，在其中添加了一行：coco_eval.params.kpt_oks_sigmas = np.array([.5, .5]) / 10.0

#%% import libs
#--------------------------------------------------------
import os
from ssl import SSLSyscallError
import sys
import numpy as np
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from torchvision.models.detection.keypoint_rcnn import KeypointRCNNPredictor
from torchvision.models.detection import MaskRCNN
import torchvision.transforms as tT
import torchvision.transforms.functional as F
from torchvision.utils import draw_segmentation_masks
import matplotlib.pyplot as plt

from PIL import Image, ExifTags
import json
import PySimpleGUI as sg
from _pointer_meter_helpers import rotate_im_accord_exiftag, load_anno, load_valid_imfile_names

sys.path.insert(0, './torchvision_det_references') #确保可以通过下面的语句导入位于子目录中的包
import utils
from engine import train_one_epoch, evaluate
import transforms as T

# %% 定义dataset类
#-------------------------------------------------------------------
class MeterSegDataset(torch.utils.data.Dataset):
    def __init__(self, root, transforms, down_scale_factor=8):
        self.root = root # root folder path
        self.transforms = transforms # data transformations
        self.down_scale_factor = down_scale_factor # 图像缩小为原图的比例
                            
        self.imgs = load_valid_imfile_names(root)
    
    def __len__(self):
        return len(self.imgs)
    
    def __getitem__(self, idx):        
        fpath = os.path.join(self.root, self.imgs[idx])
        img = Image.open(fpath)
        img = rotate_im_accord_exiftag(img)
        img = img.convert('RGB')
        #缩小图像
        im_sz=(img.size[0]//self.down_scale_factor, img.size[1]//self.down_scale_factor)
        img = img.resize(im_sz)
        
        #载入标注信息
        anno = load_anno(self.root, self.imgs[idx], 1)
        #获得包围盒信息
        boxes = []
        for b in anno['bbox']:
            #缩小原始的数值
            for i in range(4):
                b[i] = b[i]/self.down_scale_factor
            
            #转换为x0,y0,x1,y1格式
            bbox = [b[0],b[1],b[0]+b[2],b[1]+b[3]]
            boxes.append(bbox)
        
        boxes = torch.as_tensor(boxes, dtype=torch.float32)

        #获得keypoint信息
        keypoints = []
        for dir in anno['pdir']:
            pt1 = [v/self.down_scale_factor for v in dir[:2]]
            pt1.append(1)
            pt2 = [v/self.down_scale_factor for v in dir[2:]]
            pt2.append(1)
            pts = [pt1, pt2]

            keypoints.append(pts)
        
        keypoints = torch.as_tensor(keypoints, dtype=torch.float32)

        #获得类别信息，只有1类目标就是仪表
        num_objs = len(boxes)
        labels = torch.ones((num_objs,), dtype=torch.int64)
        
        #获取其他coco格式的必要信息
        image_id = torch.tensor([idx])
        area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
        iscrowd = torch.zeros((num_objs,), dtype=torch.int64)

        #获得标签信息
        target = {}
        target["boxes"] = boxes
        target["keypoints"] = keypoints
        target["labels"] = labels        
        target["image_id"] = image_id
        target["area"] = area
        target["iscrowd"] = iscrowd

        #进行变化
        if self.transforms is not None:
            img, target = self.transforms(img, target)
        
        return img, target        

# %% define the mask faster-rcnn -based model
#-----------------------------------------------------------------------------
num_classes = 2 #our dataset has two classes only - background and meter
num_keypoints = 2
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights="DEFAULT")

in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
in_features2 = model.roi_heads.keypoint_predictor.kps_score_lowres.in_channels
model.roi_heads.keypoint_predictor = KeypointRCNNPredictor(in_features2, num_keypoints)

# %% prepare for training
#------------------------------------------------------------------------
def get_transform(train):
    transforms = []
    transforms.append(T.PILToTensor())
    transforms.append(T.ConvertImageDtype(torch.float))
    if train:
        transforms.append(T.RandomPhotometricDistort())
    return T.Compose(transforms)

device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

down_sacle=12
dataset = MeterSegDataset(folder, get_transform(train=True), down_scale_factor=down_sacle)
dataset_test = MeterSegDataset(folder, get_transform(train=False),down_scale_factor=down_sacle)

# split the dataset in train and test set
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-30])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-30:])

# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
    dataset, batch_size=train_batch_size, shuffle=True, num_workers=0,
    collate_fn=utils.collate_fn)

data_loader_test = torch.utils.data.DataLoader(
    dataset_test, batch_size=test_batch_size, shuffle=False, num_workers=0,
    collate_fn=utils.collate_fn)

# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
                            momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=3,
                                                gamma=0.1)
# %% do training
#-------------------------------------------------------------------
fn = './weights/model_weights_'+exp_name+'_'+exp_no+'.pth'

if mode == "train":    
    # move model to the right device
    model.to(device)

    # scaler = torch.cuda.amp.GradScaler()
  
    print_freq = 10

    for epoch in range(num_epochs):
        # train for one epoch, printing every 10 iterations
        # train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler)
        train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq)

        # update the learning rate
        lr_scheduler.step()
        # evaluate on the test dataset
        evaluate(model, data_loader_test, device=device)

    print("Done!")

    #  save the model    
    torch.save(model.state_dict(), fn) 

# %% test the model
#----------------------------------------------------------------------
if mode == 'test':
    model.load_state_dict(torch.load(fn))    

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.to(device)

model.eval()

plt.rcParams["savefig.bbox"] = 'tight'

def show(imgs):
    if not isinstance(imgs, list):
        imgs = [imgs]
    fig, axs = plt.subplots(ncols=len(imgs), squeeze=False)
    for i, img in enumerate(imgs):
        img = img.detach()
        img = F.to_pil_image(img)
        axs[0, i].imshow(np.asarray(img))
        axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])

layout = [  [sg.Text('共有'+str(len(dataset_test))+"个带标注的水表图像")],
            [sg.Text('请输入要查看的图像序号(从0开始):'), sg.InputText('0')],            
            [sg.Button('Ok'), sg.Button('Cancel')] ]

# Create the Window
window = sg.Window('检测结果查看程序', layout)
plt.ion()
fig = plt.figure()
# Event Loop to process "events" and get the "values" of the inputs
while True:
    event, values = window.read()
    if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
        break
    if(int(values[0])>=0 and int(values[0])<len(dataset_test)):
        fid = int(values[0])
        img,label = dataset_test.__getitem__(fid)


        # # 首先定义一个转换，将 PIL.Image 转换为 PyTorch 张量
        transform = tT.Compose([
            tT.ToTensor()  # 将 PIL.Image 或 ndarray 转换为 torch.Tensor，并归一化到 [0.0, 1.0]
        ])
        # 将 PIL.Image 转换为 PyTorch 张量
        img = transform(img)
        
        x = img.unsqueeze(0)
        x = x.to(device)
        predictions = model(x)  
        
        plt.cla()
        #show the original image        
        pil_img = tT.ToPILImage()(img)
        plt.imshow(pil_img)

        # #显示预测特征点
        keypoints = predictions[0]['keypoints'].cpu().detach().numpy()
        N,K,_ = keypoints.shape
        for i in range(N):
            plt.plot(keypoints[i,0,0],keypoints[i,0,1],'r*')
            plt.plot(keypoints[i,1,0],keypoints[i,1,1],'g*')
        

        # #显示GT包围盒的标签信息
        # boxes_gt = label['boxes'].numpy()
        # nbox = boxes_gt.shape[0]
        # for i in range(nbox):
        #     box = boxes_gt[i,:]
        #     w = box[2]-box[0]
        #     h = box[3]-box[1]
        #     plt.gca().add_patch(plt.Rectangle((box[0],box[1]), w,h,fill=False, color='green'))
        

        #显示预测包围盒        
        boxes = predictions[0]['boxes'].cpu().detach().numpy()
        nbox = boxes.shape[0]
        for i in range(nbox):
            box = boxes[i,:]
            w = box[2]-box[0]
            h = box[3]-box[1]
            plt.gca().add_patch(plt.Rectangle((box[0],box[1]), w,h,fill=False, color='red'))
        
        
        
        
        # draw_im_with_anno(folder, flst[fid], anno)
        plt.draw()

window.close()

# %%
