import time
import paddle
from dataLoad.readTrainData import TrainDataset
from nets.mainNet import YOLOv3
import numpy as np
# import paddle.distributed as dist


""" 参数设置"""
anchor_path = "model_data/anchors.txt"
anchor_mask_path = "model_data/anchor_mask.txt"
IGNORE_THRESH = .7
NUM_CLASSES = 7
class_path = 'model_data/your_classes.txt'

TRAINDIR = 'datasets/train'
VALIDDIR = 'datasets/val'
MAX_EPOCH = 100


def get_lr(base_lr = 0.0001, lr_decay = 0.1):
    bd = [10000, 20000]
    lr = [base_lr, base_lr * lr_decay, base_lr * lr_decay * lr_decay]
    learning_rate = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, values=lr)
    return learning_rate


if __name__ == '__main__':
    # 获取ANCHORS
    with open(anchor_path,'r',encoding='utf-8') as f:
        line = (f.read().split(', '))
        ANCHORS = []
        for item in line:
            ANCHORS.append(eval(item))

    # 获取ANCHOR_MASKS
    with open(anchor_mask_path,'r',encoding='utf-8') as f:
        line = (f.read().split(', '))
        ANCHOR_MASKS = []
        ablock = []
        for i in range(0,len(line)):
            if len(ablock)==3:
                ANCHOR_MASKS.append(ablock)
                ablock = []
            ablock.append(eval(line[i]))

        ANCHOR_MASKS.append(ablock)

    # 创建数据读取类
    train_dataset = TrainDataset(TRAINDIR, class_path,mode='train')
    valid_dataset = TrainDataset(VALIDDIR, class_path,mode='valid')
    # test_dataset = TrainDataset(VALIDDIR, mode='valid')

    # 如果是cpu就注释掉这一句
    paddle.set_device("gpu")  # 单机多卡训练：python -m paddle.distributed.launch train.py

    model = YOLOv3(num_classes = NUM_CLASSES)  #创建模型

    # 使用paddle.io.DataLoader创建数据读取器，并设置batchsize，进程数量num_workers等参数
    train_loader = paddle.io.DataLoader(train_dataset, batch_size=10, shuffle=True, num_workers=0, drop_last=True, use_shared_memory=False)
    valid_loader = paddle.io.DataLoader(valid_dataset, batch_size=10, shuffle=False, num_workers=0, drop_last=False, use_shared_memory=False)

    # 第3处改动，增加paddle.DataParallel封装
    # model = paddle.DataParallel(model)
    model.train()


    learning_rate = get_lr()
    opt = paddle.optimizer.Momentum(
                     learning_rate=learning_rate,
                     momentum=0.9,
                     weight_decay=paddle.regularizer.L2Decay(0.0005),
                     parameters=model.parameters())  #创建优化器

    for epoch in range(MAX_EPOCH):
        for batch_id, data in enumerate(train_loader()): # batch_id = 0-(1693/10)
            img, gt_boxes, gt_labels, img_scale = data
            gt_scores = np.ones(gt_labels.shape).astype('float32')
            gt_scores = paddle.to_tensor(gt_scores)
            img = paddle.to_tensor(img)
            gt_boxes = paddle.to_tensor(gt_boxes)
            gt_labels = paddle.to_tensor(gt_labels)
            outputs = model(img)  # 前向传播，输出[P0, P1, P2]

            loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,
                                 anchors=ANCHORS,
                                 anchor_masks=ANCHOR_MASKS,
                                 ignore_thresh=IGNORE_THRESH,
                                 use_label_smooth=False)  # 计算损失函数

            loss.backward()  # 反向传播计算梯度

            if batch_id % 10 == 0:
                timestring = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
                print(f'{timestring} [TRAIN]epoch {epoch}, iter {batch_id}, output loss: {loss.numpy()[0]:.4f}')

            opt.step()  # 更新参数
            opt.clear_grad()

        # save params of model
        if (epoch % 5 == 0) or (epoch == MAX_EPOCH - 1):
            paddle.save(model.state_dict(), 'yolo_epoch{}'.format(epoch))

            model.eval()
            for i, data in enumerate(valid_loader()): # i: 0 - 245/10
                img, gt_boxes, gt_labels, img_scale = data
                gt_scores = np.ones(gt_labels.shape).astype('float32')
                gt_scores = paddle.to_tensor(gt_scores)
                img = paddle.to_tensor(img)
                gt_boxes = paddle.to_tensor(gt_boxes)
                gt_labels = paddle.to_tensor(gt_labels)
                outputs = model(img)
                loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,
                                      anchors=ANCHORS,
                                      anchor_masks=ANCHOR_MASKS,
                                      ignore_thresh=IGNORE_THRESH,
                                      use_label_smooth=False)
                if i % 1 == 0:
                    timestring = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
                    print(f'{timestring} [VALID]epoch {epoch}, iter {i}, output loss: {loss.numpy()[0]:.4f}')

        # 继续一个epoch训练
        model.train()

