from __future__ import division
from __future__ import print_function

import time
import argparse
import numpy as np
import random
import mindspore
from mindspore import context
from mindspore.context import ParallelMode #
import moxing as mox #
from dataset import create_dataset#
import os
import mindspore.dataset as ds
import mindspore.nn as nn
import mindspore.ops.operations as P
import mindspore.common.initializer as weight_init
from models import Discriminator, Generator
import mindspore.ops as ops
from mindspore import Tensor
from PIL import Image
from cell import GenTrainOneStepCell, DisTrainOneStepCell
from mindspore.train.serialization import save_checkpoint

device_id = int(os.getenv('DEVICE_ID') or 0) #
device_num = int(os.getenv('RANK_SIZE') or 1) #

def get_device_num():
    device_num = os.getenv('RANK_SIZE', '1')
    return int(device_num)

if __name__ == '__main__':
        
    parser = argparse.ArgumentParser()

    parser.add_argument('--data_url', required=True, default=None, help='Location of data.') #
    parser.add_argument('--train_url', required=True, default=None, help='Location of training outputs.') #

    # parser.add_argument('--dataroot', type=str, help="root path of dataset")
    parser.add_argument('--image_size', type=int, default= 32 , help='Size of image for discriminator input.')
    parser.add_argument('--initial_size', type=int, default=8 , help='Initial size for generator.')
    parser.add_argument('--patch_size', type=int, default=4 , help='Patch size for generated image.')
    parser.add_argument('--num_classes', type=int, default=1 , help='Number of classes for discriminator.')
    parser.add_argument('--lr_gen', type=float, default=0.0001 , help='Learning rate for generator.')
    parser.add_argument('--lr_dis', type=float, default=0.0001 , help='Learning rate for discriminator.')
    parser.add_argument('--weight_decay', type=float, default=1e-3 , help='Weight decay.')
    parser.add_argument('--latent_dim', type=int, default=1024 , help='Latent dimension.')
    parser.add_argument('--n_critic', type=int, default=5 , help='n_critic.')
    parser.add_argument('--max_iter', type=int, default=500000 , help='max_iter.')
    parser.add_argument('--gener_batch_size', type=int, default=16 , help='Batch size for generator.')
    parser.add_argument('--dis_batch_size', type=int, default=8 , help='Batch size for discriminator.')
    parser.add_argument('--epoch', type=int, default=200 , help='Number of epoch.')
    parser.add_argument('--output_dir', type=str, default='checkpoint' , help='Checkpoint.')
    parser.add_argument('--dim', type=int, default=384 , help='Embedding dimension.')
    parser.add_argument('--img_name', type=str, default="img_name" , help='Name of pictures file.')
    parser.add_argument('--optim', type=str, default="Adam" , help='Choose your optimizer')
    parser.add_argument('--loss', type=str, default="wgangp_eps" , help='Loss function')
    parser.add_argument('--phi', type=int, default="1" , help='phi')
    parser.add_argument('--beta1', type=float, default=1e-5 , help='beta1')
    parser.add_argument('--beta2', type=float, default="0.99" , help='beta2')
    parser.add_argument('--lr_decay', type=str, default=True , help='lr_decay')
    parser.add_argument('--diff_aug', type=str, default="translation,cutout,color", help='Data Augmentation')
    parser.add_argument("--device_target", type=str, default="GPU", help='The target hardware')
    parser.add_argument('--clamp_lower', type=float, default=-0.01)
    parser.add_argument('--clamp_upper', type=float, default=0.01)
    parser.add_argument('--experiment', default=None, help='Where to store samples and models')
    args_opt = parser.parse_args()

    if args_opt.experiment is None:
        #args_opt.experiment = 'samples'
        args_opt.experiment = args_opt.train_url
    os.system('mkdir {0}'.format(args_opt.experiment))


    # adapt to cloud: define local data path #
    local_data_path = '/cache/data'

    # init context
    #target = args_opt.device_target
    #context.set_context(mode=context.PYNATIVE_MODE, device_target=target)  #GRAPH_MODE
    #context.set_context(device_id=int(os.getenv('DEVICE_ID', '0')))

    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", save_graphs=False) #
    context.set_context(device_id=device_id) # 
    if device_num > 1:
        context.set_auto_parallel_context(device_num=device_num,
                                          parallel_mode=ParallelMode.DATA_PARALLEL,
                                          gradients_mean=True)
        # adapt to cloud: define distributed local data path
        local_data_path = os.path.join(local_data_path, str(device_id))

    # adapt to cloud: download data from obs to local location
    print('Download data.')
    mox.file.copy_parallel(src_url=args_opt.data_url, dst_url=local_data_path) #

    # whether train on modelarts or local server
    #dataset = create_dataset(args_opt.dataroot, batchSize=args_opt.dis_batch_size, imageSize=32)
    #dataset = create_dataset1(local_data_path, do_train=True , batch_size=args_opt.dis_batch_size) #
    dataset = create_dataset(local_data_path, batchSize=args_opt.dis_batch_size, imageSize=32)

    # fix seed
    manualSeed = random.randint(1, 10000)
    print("Random Seed: ", manualSeed)
    random.seed(manualSeed)
    ds.config.set_seed(manualSeed)

    def init_weight(net: Discriminator):
        """initial net weight"""
        for _, cell in net.cells_and_names():
            if isinstance(cell, nn.Dense):
                cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(.02),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
                if isinstance(cell, nn.Dense) and cell.bias is not None:
                    cell.bias.set_data(weight_init.initializer(weight_init.Constant(0),
                                                               cell.bias.shape,
                                                               cell.bias.dtype))


    def save_image(img, img_path):
        """save image"""
        print("Shape of generated img is {}".format(img.shape))
        
        mul = ops.Mul()
        add = ops.Add()
        if isinstance(img, Tensor):
            img = mul(img, 255 * 0.5)
            img = add(img, 255 * 0.5)

            img = img.asnumpy().astype(np.uint8).transpose((0, 2, 3, 1))

        elif not isinstance(img, np.ndarray):
            raise ValueError("img should be Tensor or numpy array, but get {}".format(type(img)))

        IMAGE_SIZE = 32  # Image size
        IMAGE_ROW = 2  # Row num
        IMAGE_COLUMN = 2  # Column num
        PADDING = 2  # Interval of small pictures
        to_image = Image.new('RGB', (IMAGE_COLUMN * IMAGE_SIZE + PADDING * (IMAGE_COLUMN + 1),
                                     IMAGE_ROW * IMAGE_SIZE + PADDING * (IMAGE_ROW + 1)))  # create a new picture
        # cycle
        ii = 0
        for y in range(1, IMAGE_ROW + 1):
            for x in range(1, IMAGE_COLUMN + 1):
                from_image = Image.fromarray(img[ii])
                to_image.paste(from_image, ((x - 1) * IMAGE_SIZE + PADDING * x, (y - 1) * IMAGE_SIZE + PADDING * y))
                ii = ii + 1

        to_image.save(img_path)  # save


    generator= Generator(depth1=5, depth2=4, depth3=2, initial_size=8, dim=384, heads=4, mlp_ratio=4, drop_rate=0.5)#,device = device)

    # 初始化D
    discriminator = Discriminator(diff_aug = args_opt.diff_aug, image_size=32, patch_size=4, input_channel=3, num_classes=1,
                 dim=384, depth=7, heads=4, mlp_ratio=4,
                 drop_rate=0.)


    init_weight(discriminator)
    
    if args_opt.optim == 'Adam':
        optimizerD = nn.Adam(discriminator.trainable_params(), learning_rate=args_opt.lr_dis, beta1=args_opt.beta1, beta2=args_opt.beta2)
        optimizerG = nn.Adam(generator.trainable_params(), learning_rate=args_opt.lr_gen, beta1=args_opt.beta1, beta2=args_opt.beta2)
    
    elif args_opt.optim == 'SGD':
        optimizerD = nn.SGD(discriminator.trainable_params(), learning_rate=args_opt.lr_dis, momentum=0.9)
        optimizerG = nn.SGD(generator.trainable_params(), learning_rate=args_opt.lr_gen, momentum=0.9)

    netG_train = GenTrainOneStepCell(generator, discriminator, optimizerG)
    netD_train = DisTrainOneStepCell(generator, discriminator, optimizerD, args_opt.clamp_lower, args_opt.clamp_upper)

    netG_train.set_train()
    netD_train.set_train()

    fixed_noise = Tensor(np.random.normal(0, 1, size=[args_opt.gener_batch_size, args_opt.latent_dim]), dtype=mindspore.dtype.float32)

    gen_iterations = 0

    # Train
    for epoch in range(args_opt.epoch):  # niter: the num of epoch
        data_iter = dataset.create_dict_iterator()
        length = dataset.get_dataset_size()
        i = 0
        while i < length:
            ############################
            # (1) Update D network
            ###########################
            for p in discriminator.trainable_params():  # reset requires_grad
                p.requires_grad = True  # they are set to False below in netG update

            # train the discriminator Diters times
            if gen_iterations < 25 or gen_iterations % 500 == 0:
                Diters = 25
            else:
                Diters = args_opt.n_critic
            j = 0
            while j < Diters and i < length:
                j += 1

                data = data_iter.__next__()
                i += 1

                # train with real and fake
                real = data['image']
                noise = Tensor(np.random.normal(0, 1, size=[P.Shape()(real)[0], args_opt.latent_dim]), dtype=mindspore.dtype.float32)
                loss_D = netD_train(real, noise)

            ############################
            # (2) Update G network
            ###########################
            for p in discriminator.trainable_params():
                p.requires_grad = False  # to avoid computation

            noise = Tensor(np.random.normal(0, 1, size=[args_opt.gener_batch_size, args_opt.latent_dim]), dtype=mindspore.dtype.float32)
            loss_G = netG_train(noise)
            gen_iterations += 1

            print('[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f'
                  % (epoch, args_opt.epoch, i, length, gen_iterations,
                     loss_D.asnumpy()[0], loss_G.asnumpy()[0]))

            # loss = Tensor(0).asnumpy()[0]
            if gen_iterations % 100 == 0:
                fake = generator(fixed_noise)
                save_image(real, '{0}/real_samples.png'.format(args_opt.experiment))
                save_image(fake, '{0}/fake_samples_{1}.png'.format(args_opt.experiment, gen_iterations))

        save_checkpoint(discriminator, '{0}/netD_epoch_{1}.ckpt'.format(args_opt.experiment, epoch))
        save_checkpoint(generator, '{0}/netG_epoch_{1}.ckpt'.format(args_opt.experiment, epoch))

    print("Train success!")