import numpy as np
import torch
import multiprocessing
from multiprocessing import Pool
import os
import sys
import numpy as np
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from torchvision.models.detection import MaskRCNN
import torchvision.transforms as tT
import torchvision.transforms.functional as F
from torchvision.utils import draw_segmentation_masks
import torchvision.ops.boxes as box_ops
from PIL import Image, ExifTags
import json
import time
import numpy as np
#使用测试的文件
#------------
# folder = "./img/8P Uneven"
folder = "./img/8P Even"
#-----------

#使用的模型
#--------------------
#第一部分分割的
exp_no = '01'
#第二部分关键点检测的
# exp_no_2 = '02'#uneven训练的
# exp_no_2 = '03'#even训练的
exp_no_2 = '04'#even 20epoch
#---------------------------------------

#是否使用可视化：使用可视化则设置look=1且注释掉look=0，并解开后面的注释
#--------------------------
look=0
# 导入本文件写的一些函数接口
from _pointer_meter_helpers import rotate_im_accord_exiftag, load_anno, load_valid_imfile_names,show,iou,my_NMS,remove_low_scores,fit_circle,get_center_seq,get_center_seq_blur,judge,judge2,get_reading,get_reading_zdir,pointer_to_read
sys.path.insert(0, './torchvision_det_references') #确保可以通过下面的语句导入位于子目录中的包
print(os.getcwd())
import transforms as T
class MeterSegDataset(torch.utils.data.Dataset):
        def __init__(self, root, transforms, down_scale_factor=8):#down_scale_factor比例缩减因子
            self.root = root # root folder path
            self.transforms = transforms # data transformations
            self.down_scale_factor = down_scale_factor # 图像缩小为原图的比例

            self.imgs = load_valid_imfile_names(root)

        def __len__(self):
            return len(self.imgs)

        def __getitem__(self, idx):
            fpath = os.path.join(self.root, self.imgs[idx])
            img = Image.open(fpath)
            img = rotate_im_accord_exiftag(img)#处理手机拍照时的旋转问题
            img = img.convert('RGB')#如果不使用.convert('RGB')进行转换的话，读出来的图像是RGBA四通道的，A通道为透明通道
            #缩小图像
            im_sz=(img.size[0]//self.down_scale_factor, img.size[1]//self.down_scale_factor)#这里size返回的是宽高而不是像素点多少
            img = img.resize(im_sz)
            print(img.size)
            #载入标注信息
            anno = load_anno(self.root, self.imgs[idx], self.down_scale_factor)
            #获得包围盒信息
            boxes = []
            for b in anno['mask_boxes']: #遍历标注信息的mask_boxes（需要可以去原函数把注释解掉）
                boxes.append(b)
            
            boxes = torch.as_tensor(boxes, dtype=torch.float32)
            print(boxes.shape) 
            #获得类别信息，只有1类目标就是仪表
            num_objs = len(boxes)
            labels = torch.ones((num_objs,), dtype=torch.int64)
            masks = torch.as_tensor(anno['masks'], dtype=torch.uint8)
            '''当mask的数据类型是torch.uint8时，此时的tensor用作mask，tensor中的1对应的行/列保留，
            0对应的行/列舍去。且被mask的维度必须与原始tensor的维度一致.
            当masks的数据类型是torch.long时，此时的tensor用作index，tensor中的每个数字代表着将要取出的tensor的行列索引'''
            #获取其他coco格式的必要信息
            image_id = torch.tensor([idx])
            area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])

            iscrowd = torch.zeros((num_objs,), dtype=torch.int64)


            #获得zdir信息(附加为了计算)
            zdir = []
            for dir in anno['zdir']:
                pt1 = [v for v in dir[:2]]
                pt1.append(1)
                pt2 = [v for v in dir[2:]]
                pt2.append(1)
                pts = [pt1, pt2]
                zdir.append(pts)
            zdir = torch.as_tensor(zdir, dtype=torch.float32)

            #获得keypoint信息
            keypoints = []
            for dir in anno['pdir']:
                pt1 = [v for v in dir[:2]]
                pt1.append(1)
                pt2 = [v for v in dir[2:]]
                pt2.append(1)
                pts = [pt1, pt2]

                keypoints.append(pts)

            keypoints = torch.as_tensor(keypoints, dtype=torch.float32)

            #target字典获得标签信息
            target = {}
            target["boxes"] = boxes
            target["labels"] = labels
            target["masks"] = masks
            target["image_id"] = image_id
            target["area"] = area
            target["iscrowd"] = iscrowd
            target["zdir"] = zdir
            target["keypoints"] = keypoints

            #进行变化
            if self.transforms is not None:
                img, target = self.transforms(img, target)

            return img, target


        # %% define the mask faster-rcnn -based model
#我们数据集共2个类别，背景和指针
num_classes = 2 
# 加载在COCO上预先训练的实例分割模型(实例分割模型）
model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=None)
# device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
device = torch.device('cpu')
model.load_state_dict(torch.load('./weights/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth', map_location=device))

# 获取分类器的输入特征数
in_features = model.roi_heads.box_predictor.cls_score.in_features
# 用新的头替换预先训练好的头
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)

# 现在获取掩码分类器的输入特征数量
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# 并用新的掩码预测器替换掩码预测器
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,hidden_layer,num_classes)
# 假设iou是已经定义好的函数
# 假设my_NMS是已经定义好的函数
# 假设model是已经加载并设置为评估模式的模型
# 假设dataset_test是你的数据集
def get_transform(train):
    transforms = []
    transforms.append(T.PILToTensor())
    transforms.append(T.ConvertImageDtype(torch.float))
    if train:
        transforms.append(T.RandomPhotometricDistort())
    return T.Compose(transforms)
dataset = MeterSegDataset(folder, get_transform(train=True), down_scale_factor=4)
dataset_test = MeterSegDataset(folder, get_transform(train=False),down_scale_factor=4)

# dataset = MeterSegDataset(folder, get_transform(train=True), down_scale_factor=8)
# dataset_test = MeterSegDataset(folder, get_transform(train=False),down_scale_factor=8)

# 划分训练集和测试集的数据
indices = torch.randperm(len(dataset)).tolist()#randperm（）随机打乱函数
dataset = torch.utils.data.Subset(dataset, indices[:-30])# Subset获取指定一个索引序列对应的子数据集
dataset_test = torch.utils.data.Subset(dataset_test, indices[-1:])

def process_batch(batch):
   


    time1 = time.time()

    


    # %% prepare for training

   



    fn = './weights/model_weights_seg_'+exp_no+'.pth'
    print(f"device1:{device}")
    model.load_state_dict(torch.load(fn, map_location=torch.device(device=device)))
    model.to(device)

    model.eval()
    num_success, imgs = 0, []
    for img, label in batch:
        x = img.unsqueeze(0)  # 增加批次维度
        # 假设模型和数据都在CPU上，所以这里没有使用.to(device)
        predictions = model(x)

        boxes = predictions[0]['boxes']
        scores = predictions[0]['scores']
        nms_threshold = 0.5
        selected_idx = my_NMS(boxes, scores, nms_threshold, 1)
        for j in selected_idx:
            selected_box = boxes[j]
            w1 = selected_box[2] - selected_box[0]
            h1 = selected_box[3] - selected_box[1]
            boxes_gt = label['boxes'].numpy()
            box = boxes_gt[0,:]
            w2 = box[2] - box[0]
            h2 = box[3] - box[1]
            Iou = iou(selected_box[0], selected_box[1], w1, h1, box[0], box[1], w2, h2)
            if Iou > 0.5:
                num_success += 1

            meter_img = img[:, int(selected_box[1]):int(selected_box[3]), int(selected_box[0]):int(selected_box[2])]
            imgs.append(meter_img)
    return num_success, imgs

def prepare_batches(dataset, batch_size):
    return [dataset[i:i + batch_size] for i in range(0, len(dataset), batch_size)]

if __name__ == '__main__':
    dataset_list = [(dataset_test[i][0], dataset_test[i][1]) for i in range(len(dataset_test))]
    batches = prepare_batches(dataset_list, 4)  # 假设我们每个批次处理4个样本

    with Pool(multiprocessing.cpu_count()) as p:
        results = p.map(process_batch, batches)

    # 汇总结果
    total_success = sum([result[0] for result in results])
    all_imgs = [img for result in results for img in result[1]]

    print(f'Total success: {total_success}')
    # 这里 all_imgs 将包含所有处理过的图像区域
