'''
 * @Author: Benjay·Shaw
 * @Date: 2024-10-30 17:48:20
 * @LastEditors: Benjay·Shaw
 * @LastEditTime: 2024-10-31 15:42:26
 * @Description: 网络模型
'''
import os
import paddle
import paddle.nn as nn
import math
from networks.unet_r50 import UNetR50
from utils.cawb import CosineAnnealingWarmbootingLR
from utils.data_process import ImageFolder
from utils.loss import *
from lightning import Fabric


class MyNet:

    def __init__(self, args, evalmode=False):
        fabric = Fabric(accelerator='cuda', precision='bf16-mixed')
        fabric.launch()
        self.img = None
        self.mask = None
        self.img_id = None
        self.fabric = fabric
        if args.arch == 'UNetR50':
            self.net = UNetR50()
        os.environ['CUDA_VISIBLE_DEVICES'] = args.device
        device = str('cuda' if paddle.device.cuda.device_count() >= 1 else
            'cpu').replace('cuda', 'gpu')
        self.device_type = device.type
        if device.type == 'cuda':
            if args.use_multiple_GPU:
                device_ids = range(paddle.device.cuda.device_count())
            else:
                device_ids = [0]
            self.net = nn.DataParallel(self.net.cuda(device_id=device,
                blocking=True), device_ids=device_ids)
            self.batch_size = paddle.device.cuda.device_count(
                ) * args.batchsize_per_card
        else:
            self.net = nn.DataParallel(self.net)
            self.batch_size = args.batchsize_per_card
        self.optimizer = paddle.optimizer.Adam(parameters=self.net.
            parameters(), learning_rate=args.lr_init, weight_decay=0.0)
        if args.use_cosine_lr:
            lf = lambda x, y=args.total_epoch: ((1 + math.cos(x * math.pi /
                y)) / 2) ** 1.0 * (1 - args.lrf) + args.lrf
            self.scheduler = CosineAnnealingWarmbootingLR(self.optimizer,
                epochs=args.total_epoch, steps=args.cawb_steps, step_scale=
                0.7, lf=lf, batchs=self.batch_size, warmup_epoch=3,
                epoch_scale=4.0)
        if args.loss_function == 'BceDiceLoss':
            self.loss = BceDiceLoss(self.net)
        self.old_lr = args.lr_init
        type_length = len(args.image_type)
        root_train = args.dataset_dir + '/train/'
        train_list = list(map(lambda x: x[:-type_length], os.listdir(
            root_train + 'imgs')))
        dataset_train = ImageFolder(train_list, root_train, args.image_type,
            args.label_type)
        train_loader = paddle.io.DataLoader(dataset=dataset_train,
            batch_size=self.batch_size, shuffle=True, num_workers=args.
            num_workers)
        root_val = args.dataset_dir + '/val/'
        val_list = list(map(lambda x: x[:-type_length], os.listdir(root_val +
            'imgs')))
        dataset_val = ImageFolder(val_list, root_val, args.image_type, args
            .label_type)
        val_loader = paddle.io.DataLoader(dataset=dataset_val, batch_size=
            self.batch_size, shuffle=True, num_workers=args.num_workers)
        if evalmode:
            self.net.eval()
        else:
            self.net, self.optimizer = self.fabric.setup(self.net, self.
                optimizer)
            self.train_loader, self.val_loader = fabric.setup_dataloaders(
                train_loader, val_loader)
            self.net.train()

    def set_input(self, img_batch, mask_batch=None, img_id=None):
        self.img = img_batch
        self.mask = mask_batch
        self.img_id = img_id

    def forward(self, volatile=False):
        if self.device_type == 'cuda':
            self.img = paddle.autograd.Variable(self.img.cuda(blocking=True),
                volatile=volatile)
        else:
            self.img = paddle.autograd.Variable(self.img, volatile=volatile)
        if self.mask is not None:
            if self.device_type == 'cuda':
                self.mask = paddle.autograd.Variable(self.mask.cuda(blocking
                    =True), volatile=volatile)
            else:
                self.mask = paddle.autograd.Variable(self.mask, volatile=
                    volatile)

    def optimize(self):
        self.forward()
        self.optimizer.clear_gradients(set_to_zero=not True)
        pred = self.net.forward(self.img)
        loss = self.loss(self.mask, pred)
        self.fabric.backward(grad_tensor=loss)
        self.optimizer.step()
        return loss.item()

    def save(self, path, save_model, is_best=False):
        if is_best:
            paddle.save(obj=self.net.state_dict(), path=path)
        else:
            paddle.save(obj=save_model, path=path)

    def load(self, path, is_best=False):
        if is_best:
            self.net.set_state_dict(state_dict=paddle.load(path=str(path)))
        else:
            return paddle.load(path=str(path))

    def update_lr(self, lrf, my_log, factor=False):
        if factor:
            new_lr = self.old_lr * lrf
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = new_lr
        my_log.write('update learning rate: %f -> %f' % (self.old_lr,
            new_lr) + '\n')
        print('update learning rate: %f -> %f' % (self.old_lr, new_lr) + '\n')
        self.old_lr = new_lr
        return new_lr
