import torch, os
from torch.utils.data import DataLoader
from transformers import DetrImageProcessor, DetrForObjectDetection
from transformers import DetrConfig
from dataset_create import CocoDetection

processor = DetrImageProcessor.from_pretrained("/data/models/detr-resnet-50")
train_dataset = CocoDetection(img_folder='/data/datasets/coco/train2017', processor=processor)
val_dataset = CocoDetection(img_folder='/data/datasets/coco/val2017', processor=processor, train=False)


def collate_fn(batch):
    pixel_values = [item[0] for item in batch]
    encoding = processor.pad(pixel_values, return_tensors="pt")
    labels = [item[1] for item in batch]
    batch = {}
    batch['pixel_values'] = encoding['pixel_values']
    batch['pixel_mask'] = encoding['pixel_mask']
    batch['labels'] = labels
    return batch


train_dataloader = DataLoader(train_dataset, collate_fn=collate_fn, batch_size=4, shuffle=True)
val_dataloader = DataLoader(val_dataset, collate_fn=collate_fn, batch_size=2)
batch = next(iter(train_dataloader))

# model = DetrForObjectDetection.from_pretrained(pretrained_model_name_or_path="/data/models/detr-resnet-50",
#                                                revision="no_timm")
# 读取自己的配置文件
configuration = DetrConfig.from_pretrained(pretrained_model_name_or_path="/data/models/detr-resnet-50")
print("类别的数量",configuration.num_labels)
# # configuration.num_labels = 1000
# 从头开始训练
model = DetrForObjectDetection(configuration)
# # 获取两个模型的state_dict
# state_dict_src = model.state_dict()
# # print("原始model ")
# for k, _ in state_dict_src.items():
#     print(k)
#
# state_dict_tgt = new_model.state_dict()
# # 由于类别数量不一样，因此我们将最后的 class 分类层去掉，加载相应的参数。
# params_to_load = {k: v for k, v in state_dict_src.items() if not k.startswith('class_labels_classifier')}
# # params_to_load = {k: v for k, v in state_dict_src.items()}
# # 更新目标模型的state_dict
# state_dict_tgt.update(params_to_load)
# # 加载更新后的state_dict到目标模型
# new_model.load_state_dict(state_dict_tgt)s
outputs = model(**batch)
print(outputs['loss'])

# 优化器
from torch.optim import Adam
optimizer = Adam(model.parameters(), lr=2e-5)

param_dicts = [
    {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]},
    {
        "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
        "lr": 1e-5,
    },
]
optimizer = torch.optim.AdamW(param_dicts, lr=1e-4,weight_decay=1e-4)

# 训练和验证
def evaluate(epoch=5, log_step=100):
    model.eval()
    with torch.no_grad():
        for batch in val_dataloader:
            if torch.cuda.is_available():
                batch = {k: v.cuda() for k, v in batch.items()}
            output = model(**batch)
            print(output.loss)


def save_model(model, epoch):
    original_path = "D:/code/models/taibiao_detection/"
    path = os.path.join(original_path, 'epoch_{}'.format(epoch))
    if not os.path.exists(path):
        os.makedirs(path)
    model.save_pretrained(path)


if torch.cuda.is_available():
    model = model.cuda()

def train(epoch=5, log_step=100):
    global_step = 0
    for epoch in range(epoch):
        model.train()
        for batch in train_dataloader:
            if torch.cuda.is_available():
                batch['pixel_values'] = batch['pixel_values'].cuda()
                batch['pixel_mask'] = batch['pixel_mask'].cuda()
                batch['labels'] = [{
                        'boxes':label['boxes'].cuda(),
                        'size': label['size'].cuda(),
                        'image_id': label['image_id'].cuda(),
                        'class_labels': label['class_labels'].cuda(),
                        'area': label['area'].cuda(),
                        'iscrowd': label['iscrowd'].cuda(),
                        'orig_size': label['orig_size'].cuda(),
                    } for label in batch['labels']]

            optimizer.zero_grad()
            output = model(**batch)
            output.loss.backward()
            optimizer.step()
            if global_step % log_step == 0:
                print('epoch:{},step:{},loss:{}'.format(epoch, global_step, output.loss.item()))
            global_step += 1

        print('evaluate epoch:{}'.format(epoch))
        evaluate()
        save_model(model, epoch)


if __name__ == '__main__':
    # 模型训练
    train()
