import torch
import torchvision

import os
import cv2
import numpy as np

from dataset import Crack

class MyLRASSP(torch.nn.Module):
    def __init__(self):
        super().__init__()
        weights = torchvision.models.segmentation.LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1.DEFAULT
        self.lrassp = torchvision.models.segmentation.lraspp_mobilenet_v3_large(weights = weights)
        self.lrassp.classifier.low_classifier = torch.nn.Conv2d(in_channels=40, out_channels=2, kernel_size=(1,1),stride=(1,1))
        self.lrassp.classifier.high_classifier = torch.nn.Conv2d(in_channels=128, out_channels=2, kernel_size=(1,1), stride=(1,1))

    def forward(self, imgs):
        result = self.lrassp(imgs)
        return result


if __name__ == '__main__':

    model = MyLRASSP()

    dataset = Crack(r'./data/train/imgs', r'./data/train/masks')
    dataset_loader = torch.utils.data.DataLoader(dataset = dataset, batch_size=4, shuffle=True)

    model.eval()
    for imgs, labels in dataset_loader:
        result = model(imgs)
        print(result['out'].shape)
        break