import torch, os, wandb, time
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from utils.vocdataset import PascalVOCDataset, VOC_CLASSES
from utils.utils import calculate_ious, draw_bbox, do_nms
from torchvision.models.detection.faster_rcnn import fasterrcnn_resnet50_fpn, FasterRCNN_ResNet50_FPN_Weights, FastRCNNPredictor

def collate_fn(data):
    images, targets = tuple(zip(*data))
    return (images, targets)

def main(learning_rate: float=5e-2):
    device = torch.device( 'cpu' )

    data_path: str = "./data/VOC2012"
    transform = transforms.Compose([ transforms.Resize((440, 500)) ])

    voc_train = PascalVOCDataset(root=data_path, year="2012", image_set="train", transform=transform)
    dataloader_train = DataLoader(dataset=voc_train, batch_size=4, collate_fn=collate_fn)

    voc_val = PascalVOCDataset(root=data_path, year="2012", image_set="val", transform=transform)
    dataloader_val = DataLoader(dataset=voc_val, batch_size=4, collate_fn=collate_fn)

    model = fasterrcnn_resnet50_fpn( pretrained=True, weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, len(VOC_CLASSES))
    model.to(device)

    params = [ p for p in model.parameters() if p.requires_grad ]
    optimizer = torch.optim.SGD(params=params, lr=learning_rate)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=3, gamma=0.1)
    num_epoches = 30
    model.train()
    print(f"Start to train")
    for epoch in range(num_epoches):
        print(f"---\n")
        print(f"#{epoch+1:02d}")
        epoch_loss_dicts = {
                'loss_classifier': [],
                'loss_box_reg': [],
                'loss_objectness': [],
                'loss_rpn_box_reg': []
            }
        for data in dataloader_train:
            images, targets = data

            loss_dict = model(images, targets)
            losses = sum([ loss for loss in loss_dict.values() ])

            for key in loss_dict.keys():
                epoch_loss_dicts[key].append(float(loss_dict[key]))

            optimizer.zero_grad()
            losses.backward()
            optimizer.step()
        lr_scheduler.step()

        # 该 epoch 结束, 处理并输出 loss
        for key in epoch_loss_dicts.keys():
            wandb.log({
                key: sum(epoch_loss_dicts[key]) / len(epoch_loss_dicts[key]),
                'epoch': epoch
                })
            print(f"{key}: {sum(epoch_loss_dicts[key]) / len(epoch_loss_dicts[key]) * 100:.3f}%")

    print(f"\nTrain Ends.\n")

    # 保存模型
    tm_struct = time.localtime(time.time())
    model_path = os.path.join(
            'models',
            f'model-{tm_struct.tm_year}-{tm_struct.tm_mon}-{tm_struct.tm_mday}-{tm_struct.tm_hour}:{tm_struct.tm_min}:{tm_struct.tm_sec}.pth'
        )
    torch.save(model.state_dict(), model_path)

    # 验证
    box_accurate_num = 0
    data_num = 0
    model.eval()
    for data in dataloader_val:
        images, targets = data
        with torch.no_grad():
            predictions = model(images)

            # 预测的框有的并不准确, 需要进行非极大值抑制 (NMS)
            # 非极大值抑制的流程:
            # 对每个 bbox, 计算与 gt 的 iou, 并剔除 iou < 0.5 的 bbox
            # 按照 iou 进行排序, 对于 iou >= 0.5 的 bbox, 剔除其中与 gt iou 较小的那一个
            # 输入: 一个 batch 的 predictions: [ {'labels': [...], 'boxes': [...], 'scores': [...]}, ... ]
            # print(f"keys = {predictions[0].keys()}")
            print(f"Length of predictions each batch before nms: {len(predictions[0]['labels'])}")
            predictions = do_nms(gt_targets=targets, pred_targets=predictions)
            print(f"Length of predictions each batch before nms: {len(predictions[0]['labels'])}")

            for i in range(len(predictions)):
                pred_boxes = predictions[i]['boxes']
                ious = calculate_ious(gt_boxes=targets[i]['boxes'], pred_boxes=pred_boxes)
                ious[ious <  0.5] = 0
                ious[ious >= 0.5] = 1

                box_accurate_num += sum(ious)
                print(f"length of ious = {len(ious)}")
                data_num += len(ious)

                img = images[i].permute(1, 2, 0)
                file_name = 'val' + targets[i]['path'].split('/')[-1]
                draw_bbox(img, predictions[i], file_name)

    print(f"data number = {data_num}")
    print(f"Val: Box prediction accuracy = {box_accurate_num}/{data_num} = {box_accurate_num / data_num * 100:.2f}%")
if __name__ == "__main__":
    learning_rate = 8e-2
    wandb.init(
            project="pytorch-faster-rcnn",
            config={
                'learning_rate': learning_rate
                }
            )
    main(learning_rate=5e-2)
