import os
import torchvision



class CocoDetection(torchvision.datasets.CocoDetection):
    def __init__(self, img_folder, processor, train=True):
        ann_file = os.path.join(img_folder, "/data/datasets/coco/annotations/instances_train2017.json" if train else "/data/datasets/coco/annotations/instances_val2017.json")
        super(CocoDetection, self).__init__(img_folder, ann_file)
        self.processor = processor

    def __getitem__(self, idx):
        # read in PIL image and target in COCO format
        # feel free to add data augmentation here before passing them to the next step
        img, target = super(CocoDetection, self).__getitem__(idx)

        # preprocess image and target (converting target to DETR format, resizing + normalization of both image and target)
        image_id = self.ids[idx]
        target = {'image_id': image_id, 'annotations': target}
        encoding = self.processor(images=img, annotations=target, return_tensors="pt")
        pixel_values = encoding["pixel_values"].squeeze() # remove batch dimension
        target = encoding["labels"][0] # remove batch dimension

        return pixel_values, target


#
#
#
# import pytorch_lightning as pl
# from transformers import DetrForObjectDetection
# import torch
#
# class Detr(pl.LightningModule):
#      def __init__(self, lr, lr_backbone, weight_decay):
#          super().__init__()
#          # replace COCO classification head with custom head
#          # we specify the "no_timm" variant here to not rely on the timm library
#          # for the convolutional backbone
#          self.model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50",
#                                                              revision="no_timm",
#                                                              num_labels=len(id2label),
#                                                              ignore_mismatched_sizes=True)
#          # see https://github.com/PyTorchLightning/pytorch-lightning/pull/1896
#          self.lr = lr
#          self.lr_backbone = lr_backbone
#          self.weight_decay = weight_decay
#
#      def forward(self, pixel_values, pixel_mask):
#        outputs = self.model(pixel_values=pixel_values, pixel_mask=pixel_mask)
#
#        return outputs
#
#      def common_step(self, batch, batch_idx):
#        pixel_values = batch["pixel_values"]
#        pixel_mask = batch["pixel_mask"]
#        labels = [{k: v.to(self.device) for k, v in t.items()} for t in batch["labels"]]
#
#        outputs = self.model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
#
#        loss = outputs.loss
#        loss_dict = outputs.loss_dict
#
#        return loss, loss_dict
#
#      def training_step(self, batch, batch_idx):
#         loss, loss_dict = self.common_step(batch, batch_idx)
#         # logs metrics for each training_step,
#         # and the average across the epoch
#         self.log("training_loss", loss)
#         for k,v in loss_dict.items():
#           self.log("train_" + k, v.item())
#
#         return loss
#
#      def validation_step(self, batch, batch_idx):
#         loss, loss_dict = self.common_step(batch, batch_idx)
#         self.log("validation_loss", loss)
#         for k,v in loss_dict.items():
#           self.log("validation_" + k, v.item())
#
#         return loss
#
#      def configure_optimizers(self):
#         param_dicts = [
#               {"params": [p for n, p in self.named_parameters() if "backbone" not in n and p.requires_grad]},
#               {
#                   "params": [p for n, p in self.named_parameters() if "backbone" in n and p.requires_grad],
#                   "lr": self.lr_backbone,
#               },
#         ]
#         optimizer = torch.optim.AdamW(param_dicts, lr=self.lr,
#                                   weight_decay=self.weight_decay)
#
#         return optimizer
#
#      def train_dataloader(self):
#         return train_dataloader
#
#      def val_dataloader(self):
#         return val_dataloader
#
# model = Detr(lr=1e-4, lr_backbone=1e-5, weight_decay=1e-4)
# outputs = model(pixel_values=batch['pixel_values'], pixel_mask=batch['pixel_mask'])
# from pytorch_lightning import Trainer
# trainer = Trainer(max_steps=300, gradient_clip_val=0.1)
# trainer.fit(model)
#
# def main():
#
#     train_dataset = build_dataset(image_set='train', args=args)
#     val_dataset = build_dataset(image_set='val', args=args)
#
#     train_dataloader = DataLoader(train_dataset, collate_fn=collate_fn, batch_size=4, shuffle=True)
#     # val_dataloader = DataLoader(val_dataset, collate_fn=collate_fn, batch_size=2)
#     inputs = next(iter(train_dataloader))
#     # print(inputs)
#     print(inputs['pixel_values'].shape)
#     print(inputs['pixel_mask'].shape)
#     # labels 是字典列表
#     print(inputs['labels'][0]['boxes'].shape)
#     """
#     print(inputs['pixel_values'].shape)
#     print(inputs['pixel_mask'].shape)
#     # labels 是字典列表
#     print(labels[0]['boxes'].shape)
#     torch.Size([2, 3, 800, 1066])
#     torch.Size([2, 800, 1066])
#     torch.Size([2, 4])
#     """
#     # # print(dataset_val[0])
#     from transformers import DetrImageProcessor, DetrForObjectDetection, AutoModelForObjectDetection
#     model = DetrForObjectDetection.from_pretrained(pretrained_model_name_or_path="/data/models/detr-resnet-50",
#                                                    revision="no_timm")
#
#     outputs = model(**inputs)
#     print(outputs['loss'])
#
#
# if __name__ == '__main__':
#     main()
