from paddleseg.datasets import Dataset
import paddleseg.transforms as T
import paddle
from paddleseg.models.losses import BCELoss
from paddleseg.core import train
from paddleseg.models import UNet, FCN, BiSeNetV2, DANet, FastSCNN, HarDNet
from paddleseg.models import DeepLabV3, PSPNet, ANN
from paddleseg.models.backbones.resnet_vd import ResNet50_vd


class MyModels():

    def __init__(self, num_classes=2, image_size=256, dataset_root='DataSet', epochs=5, batch_size=32, lr=0.5, decay_steps=10):
        # 分类数量
        self.NUM_CLASSES = num_classes
        # 图像大小
        self.IMAGE_SIZE = image_size
        # 数据根目录
        self.DATASET_ROOT = dataset_root
        # 迭代周期
        self.EPOCHS = epochs
        # 批大小
        self.BATCH_SIZE = batch_size
        # 学习率
        self.LR = lr
        self.DECAY_STEPS = decay_steps

        # 损失函数
        self.LOSSES = {}
        self.LOSSES['types'] = [BCELoss()] * 2
        self.LOSSES['coef'] = [1] * 2

        # 构建训练集变换方式
        self.train_transforms = [
            #T.RandomHorizontalFlip(),  # 水平翻转
            #T.RandomVerticalFlip(),  # 垂直翻转
            #T.RandomRotation(),  # 随机旋转
            #T.RandomScaleAspect(),  # 随机缩放
            #T.RandomDistort(),  # 随机扭曲
            T.Resize(target_size=(self.IMAGE_SIZE, self.IMAGE_SIZE)),  # 这里为了加快速度，改为512x512
            T.Normalize()  # 归一化
        ]

        # 训练集
        self.train_dataset = Dataset(
            transforms=self.train_transforms,
            dataset_root=self.DATASET_ROOT,
            num_classes=self.NUM_CLASSES,
            mode='train',
            train_path=self.DATASET_ROOT + '/train_list.txt',
            separator=' ',
        )

        # 构建验证集变换方式
        self.val_transforms = [
            T.Resize(target_size=(self.IMAGE_SIZE, self.IMAGE_SIZE)),
            T.Normalize()
        ]

        self.val_dataset = Dataset(
            transforms=self.val_transforms,
            dataset_root=self.DATASET_ROOT,
            num_classes=self.NUM_CLASSES,
            mode='val',
            val_path=self.DATASET_ROOT + '/val_list.txt',
            separator=' ',
        )

    def get_models(self, model_name):
        if model_name == 'unet':
            model = UNet(num_classes=self.NUM_CLASSES)
        elif model_name == 'fcn':
            model = FCN(num_classes=self.NUM_CLASSES, backbone=ResNet50_vd())
        elif model_name == 'bisenetv2':
            model = BiSeNetV2(num_classes=self.NUM_CLASSES)
        elif model_name == 'danet':
            DANet(num_classes=self.NUM_CLASSES)
        elif model_name == 'fastscnn':
            model = FastSCNN(num_classes=self.NUM_CLASSES)
        elif model_name == 'hardnet':
            model = HarDNet(num_classes=self.NUM_CLASSES)

        elif model_name == 'deeplabv3': # support Resnet50_vd
            model = DeepLabV3(num_classes=self.NUM_CLASSES, backbone=ResNet50_vd())
        elif model_name == 'pspnet': # support Resnet50
            model = PSPNet(num_classes=self.NUM_CLASSES, backbone=ResNet50_vd())
        elif model_name == 'ann': # support Resnet50
            model = ANN(num_classes=self.NUM_CLASSES, backbone=ResNet50_vd())

        return model

    def train_model(self, model, save_dir):
        scheduler  = paddle.optimizer.lr.PolynomialDecay(self.LR, 10)
        adam = paddle.optimizer.Adam(scheduler, parameters=model.parameters())

        # 训练
        train(
            model=model,
            train_dataset=self.train_dataset,
            val_dataset=self.val_dataset,
            optimizer=adam,
            save_dir=save_dir,
            iters=self.EPOCHS * 6141 // self.BATCH_SIZE,
            batch_size=self.BATCH_SIZE,
            save_interval= self.EPOCHS * 6141 // self.BATCH_SIZE // 5,
            log_iters=10,
            num_workers=0,
            losses=self.LOSSES,
            use_vdl=True)