'''
 * @Author: Benjay·Shaw
 * @Date: 2024-10-31 17:07:51
 * @LastEditors: Benjay·Shaw
 * @LastEditTime: 2024-10-31 22:42:53
 * @Description: 网络模型
'''
import os
import paddle
import cv2
import numpy as np
from networks.se_resnet50 import SE_Resnet50
from networks.sk_resnet50 import SK_Resnet50
from utils.loss import *


class MyNet:

    def __init__(self, args, eval_mode=False, pretrained=True):
        self.network = args.arch
        self.loss_function = args.loss_function
        if args.arch == 'SE_Resnet50':
            net = SE_Resnet50(nums_class=args.classes, pretrained=
                pretrained, strides=(1, 2, 1, 2))
        elif args.arch == 'SK_Resnet50':
            net = SK_Resnet50(nums_class=args.classes)
        self.net = net
        if args.use_multiple_GPU:
            self.net = paddle.DataParallel(layers=self.net.cuda(blocking=True))
            self.batch_size = paddle.device.cuda.device_count(
                ) * args.batchsize_per_card
        else:
            os.environ['CUDA_VISIBLE_DEVICES'] = args.device
            if paddle.device.cuda.device_count() >= 1:
                self.net = paddle.DataParallel(layers=self.net)
            self.batch_size = args.batchsize_per_card
        if args.loss_function == 'CACLoss':
            self.loss = CACLoss()
        if eval_mode:
            self.net.eval()
        else:
            self.net.train()
            self.old_lr = args.lr_init
            self.optimizer = paddle.optimizer.Adam(parameters=self.net.
                parameters(), learning_rate=args.lr_init, weight_decay=0.0)

    def set_input(self, img_pre, img_now, label):
        self.img_pre = img_pre
        self.img_now = img_now
        self.label = label

    def optimize(self):
        self.forward()
        self.optimizer.clear_gradients(set_to_zero=not True)
        label = self.label.to('float32')
        result = self.net.forward(self.img_pre, self.img_now)
        label = paddle.squeeze(x=label)
        label = label.astype(dtype='int64')
        loss = self.loss(self.img_pre, self.img_now, result, label)
        loss.backward()
        self.optimizer.step()
        return loss.item()

    def forward(self, volatile=False):
        self.img_pre = paddle.autograd.Variable(self.img_pre.cuda(blocking=
            True), volatile=volatile)
        self.img_now = paddle.autograd.Variable(self.img_now.cuda(blocking=
            True), volatile=volatile)
        self.label = paddle.autograd.Variable(self.label.cuda(blocking=True),
            volatile=volatile)

    def save(self, path, save_model, is_best=False):
        if is_best:
            paddle.save(obj=self.net.state_dict(), path=path)
        else:
            paddle.save(obj=save_model, path=path)

    def load(self, path, is_best=False):
        if is_best:
            self.net.set_state_dict(state_dict=paddle.load(path=str(path)))
        else:
            return paddle.load(path=str(path))

    def update_lr(self, lrf, my_log, factor=False):
        if factor:
            new_lr = self.old_lr * lrf
            new_lr_rsi = 0
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = new_lr
        update_lr_info = 'update learning rate: %f -> %f' % (self.old_lr,
            new_lr) + '\n'
        my_log.write(update_lr_info)
        print(update_lr_info)
        self.old_lr = new_lr
        return new_lr
