import os
import sys

import numpy as np
import mindspore as ms
from mindspore import context, Model
import mindspore.dataset as ds
import mindspore.nn as nn
from mindvision.engine.callback import LossMonitor
from mindspore.train.callback import SummaryCollector
from mindspore.train.callback._callback import Callback

from models.Loss import LossForMultiLabel, NetWithLossCell
from models.ONet import ONet
from utils.data import GetDatasetGenerator


class Acc_Callback(Callback):
    def __init__(self, net, train_dataset):
        super(Acc_Callback, self).__init__()
        self.net = net
        self.train_dataset = train_dataset

    def accuracy(self, class_out, label):
        # 查找neg 0 和pos 1所在的位置
        class_out = class_out.asnumpy()
        label = label.asnumpy()
        label = np.squeeze(label)
        zeros = np.zeros(label.shape)
        cond = np.greater_equal(label, zeros)
        picked = np.where(cond)
        valid_label = label[picked]
        valid_class_out = class_out[picked]
        # 求neg 0 和pos 1的准确率
        acc = np.sum(np.argmax(valid_class_out, 1)
                     == valid_label, dtype='float')
        acc = acc / valid_label.shape[0]
        return acc

    def epoch_end(self, run_context):
        print('start calculate acc...')
        batch_acc_list = []
        for data in self.train_dataset.create_dict_iterator():
            img = data['img']
            label = data['label']
            class_out, _, _ = self.net(img)
            batch_acc = self.accuracy(class_out, label)
            batch_acc_list.append(batch_acc)
        batch_acc_list = np.array(batch_acc_list)
        acc = batch_acc_list.mean()
        print(f'train dataset acc : {acc}')


def cosine_lr(base_lr, decay_steps, total_steps):
    lr_each_step = []
    for i in range(total_steps):
        step_ = min(i, decay_steps)
        new_lr = base_lr * 0.5 * (1 + np.cos(np.pi * step_ / decay_steps))
        lr_each_step.append(new_lr)
    lr_each_step = np.array(lr_each_step).astype(np.float32)
    return lr_each_step


def print_trainable_params_count(network):
    params = network.trainable_params()
    trainable_params_count = 0
    for i in range(len(params)):
        param = params[i]
        shape = param.data.shape
        size = np.prod(shape)
        trainable_params_count += size
    print("trainable_params_count:" + str(trainable_params_count))


if __name__ == "__main__":
    sys.path.append("../")

    # 训练参数值
    data_path = '../dataset/48/all_data'
    batch_size = 384
    epoch_num = 10
    model_path = '../infer_models'

    # 获取数据
    dataset_generator = GetDatasetGenerator(data_path)
    train_dataset = ds.GeneratorDataset(list(dataset_generator), column_names=[
        "img", "label", "bbox", "landmark"])
    print("dataset done")
    train_dataset = train_dataset.batch(
        batch_size=batch_size)
    print("shuffle down")

    context.set_context(device_target='GPU', mode=context.GRAPH_MODE)

    steps = train_dataset.get_dataset_size()
    print("steps=", steps)

    onet = ONet()
    print_trainable_params_count(onet)
    # 定义多标签损失函数
    loss_fn = LossForMultiLabel()
    # 获取学习率衰减函数
    lr = cosine_lr(0.001, steps * epoch_num, steps * epoch_num)

    net_with_loss = NetWithLossCell(onet, loss_fn)
    opt = nn.Adam(params=onet.trainable_params(),
                  learning_rate=lr, weight_decay=1e-4)

    model = Model(network=net_with_loss, loss_fn=None, optimizer=opt)
    summary_collector = SummaryCollector(
        summary_dir='../summary_dir', collect_freq=1)
    model.train(epoch=epoch_num, train_dataset=train_dataset,
                callbacks=[LossMonitor(lr_init=lr.tolist(), per_print_times=100),
                           # Acc_Callback(onet, train_dataset),
                           ],
                dataset_sink_mode=False)

    # 保存模型
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    ms.save_checkpoint(onet, os.path.join(model_path, 'ONet.ckpt'))
