#!/usr/bin/env python3
#-*- coding:utf8 -*-
# Power by 2020-06-13 16:10:28

import paddle
import paddle.fluid as fluid
import os
import numpy as np
import sys
curpath=os.path.abspath(os.curdir)
sys.path.append(curpath)
import dataloader
import model

print('动态获取gpu卡号 device_id=fluid.dygraph.parallel.Env().dev_id')
print('动态获取gpu卡号 place=fluid.CUDAPlace(device_id)')
print('预处理: strategy=fluid.dygraph.parallel.prepare_context()')
print('预处理: model=MNIST(mnist)')
print('预处理: model=fluid.dygraph.parallel.DataParallel(model,strategy)')
print('不同的GPU加载不同的数据: valid_loader=paddle.batch(paddle.dataset.mnist.test(),batch_size=16,drop_last=true)')
print('不同的GPU加载不同的数据: valid_loader=fluid.contrib.reader.distributed_batch_reader(valid_loader)')
print('收集每批次训练数据的loss，并聚合参数的梯度: avg_loss=mnist.scale_loss(avg_loss)')
print('收集每批次训练数据的loss，并聚合参数的梯度: avg_loss.backward()')
print('收集每批次训练数据的loss，并聚合参数的梯度: mnist.apply_collective_grades()')
print('cpu: with fluid.dygraph.guard(place=fluid.CPUPlace())')
print('GPU: with fluid.dygraph.guard(place=fluid.CUDAPlace(0)) 0是第一个GPU卡')
print('use_gpu=False')
print('place=fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()')
print('with fluid.dygraph.guard(place):')


def multi_gpu_train():
    """TODO: Docstring for multi_gpu_train.
    :returns: TODO

    """
    place=fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id)
    with fluid.dygraph.guard(place):
        strategy=fluid.dygraph.parallel.prepare_context()
        md=model.MNIST()
        md=fluid.dygraph.parallel.DataParallel(md,strategy)
        md.train()
        train_data=dataloader.load_data('train')
        train_data=fluid.contrib.reader.distributed_batch_reader(train_data)

        optimizer=fluid.optimizer.SGDOptimizer(learning_rate=0.01,parameter_list=md.parameters())
        epoch_num=5
        for epoch_id in range(epoch_num):
            for batch_id,data in enumerate(train_data()):
                image_data,label_data=data
                image=fluid.dygraph.to_variable(image_data)
                label=fluid.dygraph.to_variable(label_data)
                fw=md(image)
                loss=fluid.layers.cross_entropy(fw,label)
                avg_loss=fluid.layers.mean(loss)
                avg_loss=mnist.scale_loss(avg_loss)
                avg_loss.backward()
                md.apply_collective_grads()

                print('# 最小化损失函数，清除本次训练的梯度')
                optimizer.minimize(avg_loss)
                md.clear_gradients()
                if batch_id % 100 ==0:
                    print('epoch_id:{},batch_id:{},loss:{}'.format(epoch_id,batch_id,avg_loss.numpy()))
        fluid.save_dygraph(md.state_dict(),'mnist-model')
print('运行方式:python -m paddle.distributed.launch --selected_gpus=0,1,2,3 --log_dir ./mylog train_multi_gpu.py')
print('paddle.distributed.launch：启动分布式运行。')
print('selected_gpus：设置使用的GPU的序号（需要是多GPU卡的机器，通过命令watch nvidia-smi查看GPU的序号）。')
print('log_dir：存放训练的log，若不设置，每个GPU上的训练信息都会打印到屏幕。')
print('train_multi_gpu.py：多GPU训练的程序，包含修改过的train_multi_gpu()函数。')
