#构建数据集。
#定义神经网络模型。
#定义超参、损失函数及优化器。
#输入数据集进行训练与评估。

from mindspore import nn, ops, Tensor, value_and_grad
from mindspore.ops import CustomRegOp, DataType
from mindspore.common.initializer import Zero
import numpy as np
import mindspore as ms
from lighteyes import LightEyes
from load_dataset import train_data,val_data

epochs = 100
model=LightEyes()
indicator=ms.Tensor(shape=(1,1,712, 1072), dtype=ms.int32, init=Zero())#判别矩阵,判别哪些像素被丢弃
num_area=ms.Tensor(shape=(2), dtype=ms.int32, init=Zero())#数值矩阵,保存前向传播中计算出的前景区像素数和保留背景区像素数

custom_op_forward = CustomRegOp() \
    .input(0, "output", "required") \
    .input(1, "label", "required") \
    .input(2, "indicator", "required") \
    .input(3, "num_area", "required") \
    .output(0, "out","required") \
    .dtype_format(DataType.F32_NCHW,DataType.F32_NCHW,DataType.I32_NCHW,DataType.I32_Default,DataType.F32_Default) \
    .target("CPU") \
    .get_op_info()

custom_op_backward = CustomRegOp() \
    .input(0, "prob", "required") \
    .input(1, "indicator", "required") \
    .input(2, "num_area", "required") \
    .output(0, "grads","required") \
    .dtype_format(DataType.F32_NCHW,DataType.I32_NCHW,DataType.I32_Default,DataType.F32_NCHW) \
    .target("CPU") \
    .get_op_info()

custom_op_test = CustomRegOp() \
    .input(0, "output", "required") \
    .input(1, "l", "required") \
    .input(2, "is_visited", "required") \
    .output(0, "output","required") \
    .dtype_format(DataType.F32_NCHW,DataType.F32_NCHW,DataType.I32_NCHW,DataType.F32_Default) \
    .target("CPU") \
    .get_op_info()

#参数为正向的输入,正向的输出,输出梯度
#损失函数没有下一层,grads_output为None
def backward_loss():
    op_backward=ops.Custom("./loss_backward.so:CustomBackward", out_shape=[1,1,712,1072], out_dtype=ms.float32,
                           func_type="aot",reg_info=custom_op_backward)
    #正向输入4个:prob,label,indicator,num_area
    #正向输出:loss
    #输出梯度:None,不存在
    def custom_bpdrop(prob,label,indicator,num_area,loss,grads_output):#bprop函数的入参顺序约定为正向的输入,正向的输出,输出梯度
        grads_in=op_backward(prob,indicator,num_area)
        indicator*=0 #下一次训练时判别矩阵恢复初值
        num_area*=0 #下一次训练时判别矩阵恢复初值
        return grads_in
    return custom_bpdrop

#前向传播算子,反向传播函数为backward_loss()
op_train=ops.Custom("./loss_forward.so:CustomSigmoidRandomDrop", out_shape=[1], out_dtype=ms.float32, 
                    bprop=backward_loss(),func_type="aot",reg_info=custom_op_forward)
#评价指标算子,实现cal_RE_PR的具体功能
op_test=ops.Custom("./cal_RE_PR.so:CustomTest",out_shape=[2],out_dtype=ms.float32,func_type="aot",reg_info=custom_op_test)

#正向传播,获得损失值
def forward_fn(data,label):
    print("forward_fn")
    prob,output=model(data)
    loss=op_train(prob,label,indicator,num_area)
    return loss[0]

#Adam优化器的接口
optimizer =nn.Adam(model.trainable_params(),learning_rate=1e-4,weight_decay=1.0,use_lazy=False,use_offload=False)
#计算梯度
grad_fn=value_and_grad(forward_fn,grad_position=None,weights=model.trainable_params(),has_aux=False)

#对图像单步训练
def train_step(data,label):
    print("train_step")
    loss,grads=grad_fn(data,label)
    optimizer(grads)
    return loss

#对数据集训练
def train_loop(model, dataset):
    model.set_train()
    print("train loop")
    for (image, label) in enumerate(dataset.create_dict_iterator()):
        loss = train_step(label['image'], label['label'])
        print("loss: ")
        print(loss)

#测试仅需要模型以及数据集,不需要其他数据
#只需要获取到conv31的输出和此图像的标注图像即可
def test_loop(model, dataset):
    model.set_train(False)
    length=dataset.get_dataset_size()
    RE=0
    PR=0
    is_visited=ms.Tensor(shape=(1,1,1072, 712), dtype=ms.int32, init=Zero())#判别矩阵,判别哪些像素已经遍历过
    for (image, label) in enumerate(dataset.create_dict_iterator()):
        prob,output=model(label['image'])#训练后得到的特征图
        l=label['label']
        num_array=op_test(output,l,is_visited)
        is_visited*=0 #下一次训练时判别矩阵恢复初值
        r=num_array[0]
        p=num_array[1]
        RE+=r
        PR+=p
    RE/=length
    PR/=length
    F1_score=2*RE*PR/(RE+PR)
    return RE,PR,F1_score

for i in range(epochs):
    print(f"Epoch {i+1}\n")
    train_loop(model, train_data)
    if i%10==0:
        RE,PR,F1_score=test_loop(model,val_data)
        print(f"Epoch{i+1},F1_score is {F1_score}")
    
print("Done!")
