from dataProcess import trainDataset, testDataset
from logisticModel import model, nn
from mindspore import value_and_grad
import mindspore

# 超参
epochs = 500
learning_rate = 0.01
optimizer = nn.Adam(model.trainable_params(), learning_rate=learning_rate)
loss_fn = nn.BCELoss()


def forwardFun(X, Y):
    y_pred = model(X).squeeze()
    # 计算损失函数值大小
    cost = loss_fn(y_pred, Y.astype(y_pred.dtype))
    cost = cost.squeeze()
    return cost, y_pred

grad_fn = value_and_grad(forwardFun, None, optimizer.parameters, has_aux=True)
def trainLoop():
    model.set_train()
    loss_sum = 0.0
    total, acc_sum = 0, 0.0   # 注意acc_sum要为 float类型
    for data, label in trainDataset.create_tuple_iterator():
        label = label.squeeze()
        (loss, pred), grads = grad_fn(data, label)
        optimizer(grads)  # 使用优化器“自动”更新参数
        # 手动更新参数
        # model.W -= learning_rate * grads[0]
        # model.b -= learning_rate * grads[1]

        loss_sum += loss
        acc_sum += ((pred >= 0.5) == (label.astype(mindspore.bool_))).sum()
        total += len(data)

    loss_avg = loss_sum / trainDataset.get_dataset_size()
    acc_avg = acc_sum / total

    print(f"Train: \n Accuracy: {100 * acc_avg}%, Avg loss: {loss_avg} \n")

def testLoop():
    model.set_train(False)
    loss_sum = 0.0
    total, acc_sum = 0, 0.0   # 注意acc_sum要为 float类型
    for data, label in testDataset.create_tuple_iterator():
        label = label.squeeze()
        (loss, pred), grads = grad_fn(data, label)

        loss_sum += loss
        acc_sum += ((pred >= 0.5) == (label.astype(mindspore.bool_))).sum()
        total += len(data)

    loss_avg = loss_sum / testDataset.get_dataset_size()
    acc_avg = acc_sum / total

    print(f"Test: \n Accuracy: {100 * acc_avg}%, Avg loss: {loss_avg} \n")


print("Training......")
for t in range(epochs):
    print(f"Epoch {t+1}\n-------------------------------")
    trainLoop()
    testLoop()
print("Done!")