import os
import sys
from datetime import datetime
import mindspore as ms
from mindspore import context, Model
import mindspore.dataset as ds
import mindspore.nn as nn
from mindvision.engine.callback import LossMonitor
from mindspore.train.callback import SummaryCollector
from mindspore.train.callback._callback import Callback
import numpy as np
from models.Loss1 import NetWithLossCell,LossForMultiLabel
from models.PNet1 import PNet
from utils.data import CustomDataset

def print_trainable_params_count(network):
    params = network.trainable_params()
    trainable_params_count = 0
    for i in range(len(params)):
        param = params[i]
        shape = param.data.shape
        size = np.prod(shape)
        trainable_params_count += size
    print("trainable_params_count:" + str(trainable_params_count))

def cosine_lr(base_lr, decay_steps, total_steps):
    lr_each_step = []
    for i in range(total_steps):
        step_ = min(i, decay_steps)
        new_lr = base_lr * 0.5 * (1 + np.cos(np.pi * step_ / decay_steps))
        lr_each_step.append(new_lr)
    lr_each_step = np.array(lr_each_step).astype(np.float32)
    return lr_each_step

if __name__ == '__main__':
    sys.path.append("../")
    # 训练参数值
    data_path = '../dataset/12/all_data'
    batch_size = 384
    epoch_num = 10
    model_path = '../infer_models'

    # 获取数据
    train_loader = CustomDataset(data_path)
    train_dataset = ds.GeneratorDataset(list(train_loader), column_names=["img", "label", "bbox", "landmark"])
    train_dataset = train_dataset.batch(batch_size=batch_size)
    context.set_context(device_target='CPU', mode=context.GRAPH_MODE)
    steps = train_dataset.get_dataset_size()
    print("steps=", steps)
    # 获取P模型
    pnet = PNet()
    print_trainable_params_count(pnet)
    # 设置优化方法
    lr = cosine_lr(0.001, steps * epoch_num, steps * epoch_num)
    # 获取损失函数
    loss_final = LossForMultiLabel()
    net_with_loss = NetWithLossCell(pnet, loss_final)
    opt = nn.Adam(params=pnet.trainable_params(),
                  learning_rate=lr, weight_decay=1e-4)
    # 开始训练
    model = Model(network=net_with_loss, loss_fn=None, optimizer=opt)
    # 保存模型
    summary_collector = SummaryCollector(
        summary_dir='../summary_dir', collect_freq=1)
    model.train(epoch=epoch_num, train_dataset=train_dataset,
                callbacks=[LossMonitor(lr_init=lr.tolist(), per_print_times=100)],
                dataset_sink_mode=False)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    ms.save_checkpoint(pnet, os.path.join(model_path, 'PNet.ckpt'))
