from transformers import DetrImageProcessor, DetrForObjectDetection, AutoModelForObjectDetection
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, Trainer, TrainingArguments
from transformers.models.detr.modeling_detr import DetrConvEncoder  # 用於定位源碼
from transformers import DetrConfig
import torch
from PIL import Image
import dataset_load
import requests
import os

# https://blog.csdn.net/weixin_44826203/article/details/137605207
# https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb
print(os.path.exists("/data/models/detr-resnet-50"))

url = "/download/1111.jpg"
image = Image.open(url)

# # you can specify the revision tag if you don't want the timm dependency
processor = DetrImageProcessor.from_pretrained("/data/models/detr-resnet-50-train")
inputs = processor(images=[image,image], return_tensors="pt")
# print(inputs.keys())

model = DetrForObjectDetection.from_pretrained(pretrained_model_name_or_path="/data/models/detr-resnet-50",
                                               revision="no_timm")
# print("pretrain model loading finish")
#
# configuration = DetrConfig()
# # configuration.num_labels = 1000
# # print(model)
# ##################################
# new_model = DetrForObjectDetection(configuration)
# # 获取两个模型的state_dict
# state_dict_src = model.state_dict()
# # print("原始model ")
# for k, _ in state_dict_src.items():
#     print(k)
#
# state_dict_tgt = new_model.state_dict()
# # 由于类别数量不一样，因此我们将最后的 class 分类层去掉，加载相应的参数。
# params_to_load = {k: v for k, v in state_dict_src.items() if not k.startswith('class_labels_classifier')}
# # params_to_load = {k: v for k, v in state_dict_src.items()}
# # 更新目标模型的state_dict
# state_dict_tgt.update(params_to_load)
# # 加载更新后的state_dict到目标模型
# new_model.load_state_dict(state_dict_tgt)
#
# dataset_train, dataset_val = dataset_load.dataset_load()
# print("11111111111111111111111111111111", inputs.keys())
# print(inputs['pixel_values'].shape)
# print(inputs['pixel_mask'])
# """
# torch.Size([1, 3, 800, 1066])
# torch.Size([1, 800, 1066])
# """
#
# # print(dataset_train[0])
# # print(dataset_val[0])
# # print(dataset_train[0][0].shape)
# print(dataset_train[0][1].keys())
#
# new_data = dataset_train[0]
# inputs = {
#     "pixel_values": torch.stack([new_data[0], new_data[0]], dim=0),
#     "pixel_mask": torch.stack([new_data[1]["masks"], new_data[1]["masks"]])
# }
# print(111,inputs['pixel_values'].shape)
# print(222,inputs['pixel_mask'].shape)
# outputs = new_model(**inputs)
# print(outputs)
#
# sampler_train = torch.utils.data.RandomSampler(dataset_train)
# sampler_val = torch.utils.data.SequentialSampler(dataset_val)
#
# batch_sampler_train = torch.utils.data.BatchSampler(
#     sampler_train, 2, drop_last=True)
#
# import util.misc as utils
# from torch.utils.data import DataLoader, DistributedSampler
#
# data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
#                                collate_fn=utils.collate_fn, num_workers=1)
# data_loader_val = DataLoader(dataset_val, 2, sampler=sampler_val,
#                              drop_last=False, collate_fn=utils.collate_fn, num_workers=1)
# for id,data in enumerate(data_loader_train):
#     print(id,data)

labels = [{'size': torch.LongTensor([ 800, 1066]),
           'image_id': torch.LongTensor([0]),
           'class_labels': torch.LongTensor([0,2]),
           'boxes': torch.FloatTensor([[0.5955, 0.5811, 0.2202, 0.3561],[0.5955, 0.5811, 0.2202, 0.3561]]),
           'area': torch.FloatTensor([3681.5083]),
           'iscrowd': torch.LongTensor([0]),
           'orig_size': torch.LongTensor([1536, 2048])}
]
print(inputs['pixel_values'].shape)
print(inputs['pixel_mask'].shape)
# labels 是字典列表
print(labels[0]['boxes'].shape)
"""
torch.Size([2, 3, 800, 1066])
torch.Size([2, 800, 1066])
torch.Size([2, 4])
"""
inputs.update({
    "labels":labels
})


outputs = model(**inputs)
"""
torch.Size([1, 100, 92])
torch.Size([1, 100, 4])
torch.Size([1, 100, 256])
torch.Size([1, 850, 256])
"""
print(outputs['loss'])


# target_sizes = torch.tensor([image.size[::-1]])
# results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0]

# for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
#     box = [round(i, 2) for i in box.tolist()]
#     print(
#             f"Detected {model.config.id2label[label.item()]} with confidence "
#             f"{round(score.item(), 3)} at location {box}"
#     )
##########################
# 画图
# def display_results(detections, image):
#     # Visualization of the detection results on the input image
#     plt.imshow(image)
#
#     ax = plt.gca()
#     for detection in detections:
#         # Drawing bounding boxes and displaying labels with confidence
#         box = detection["box"]
#         rect = patches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1],
#                                  linewidth=2, edgecolor='r', facecolor='none')
#         ax.add_patch(rect)
#
#         plt.text(box[0], box[1], f"{detection['label']} {detection['confidence']}%",
#                  bbox=dict(facecolor='red', alpha=0.5))
#
#     plt.show()