import os
import mindspore
from mindspore import Model, context
import ipywidgets as wgs

dropdown = wgs.Dropdown(description='Select Device', options=['Click to choose', "CPU", "Ascend NPU"])
output = wgs.Output()


def changeDevice(change):
    output.clear_output()
    token = change.new
    with output:
        if token == 'Click to choose':
            print('请选择运行环境')
        if token == 'CPU':
            print('请确认当前环境为CPU\n==================')
            try:
                context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=False)
                print('已检查，确认环境为CPU')
            except ValueError:
                print('错误！当前是系统硬件不是CPU,请重新选择合适的硬件设备')
        if token == "Ascend NPU":
            print('请确认当前环境为Ascend NPU\n===================')
            try:
                context.set_context(mode=context.GRAPH_MODE, device_target='Ascend NPU', save_graphs=False)
                print('当前环境为Ascend NPU')
            except ValueError:
                print("错误！当前是系统硬件不是Ascend NPU，请重新选择合适的硬件设备")


# dropdown.observe(changeDevice,names='value')

mindspore.set_seed(1)
data_root = './data'
workers = 4
batch_size = 128
image_size = 64
nc = 3
nz = 100
ngf = 64
ndf = 64
num_epochs = 1
lr = 0.0002
beta1 = 0.5
if dropdown.value == "CPU":
    print("选择的环境是：" + dropdown.value)
    print(222)
    try:
        context.set_context(mode=context.GRAPH_MODE, device_target='CPU', save_graphs=False)
        print('CPU环境设置成功')
    except ValueError:
        print('错误！当前是系统硬件不支持CPU.请返回上一步重新选择合适的硬件设备')
elif dropdown.value == 'Ascend NPU':
    print('选择的环境是：' + dropdown.value)
    device_id = 0
    try:
        context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=False)
        import moxing
        import argparse
        import os

        parser = argparse.ArgumentParser()
        parser.add_argument('--data_url', required=False, default='minist/', help='Location of data.')
        args, unknown = parser.parse_known_args()
        if args.data_url.startswith('s3'):
            moxing.file.copy_parallel(src_url=args.data_url, dst_url='minist/')
            data_path = 'minist/'

        else:
            data_path = os.path.abspath(args.data_url)
        print('Ascend NPU环境设置成功！')
    except ValueError:
        print('错误，当前是系统硬件不支持Ascend NPU。请返回上一步重新选择合适的硬件设备')

import os
import requests
import time
import zipfile


def download_and_unzip(url, path):
    if not os.path.exists(path):
        os.mkdir(path)
    file_path = os.path.join(path, 'data.zip')
    start = time.time()
    user, password = 'tyx_neu', 'Sportlab307'
    resp = requests.get(url, auth=(user, password), stream=True)
    size = 0
    chunk_size = 1024
    content_size = int(resp.headers['content-length'])
    try:
        if resp.status_code == 200:
            print('Start download,[File size]:{size:.2f}MB'.format(size=content_size / chunk_size / 1024))
            with open(file_path, 'wb') as file:
                for data in resp.iter_content(chunk_size=chunk_size):
                    file.write(data)
                    size += len(data)
                    print('\r' + '[下载进度]：%s%.2f%%' % (
                        '>' * int(size * 50 / content_size), float(size / content_size * 100)), end=' ')
        end = time.time()
        print('\n' + 'download completed!,times:%.2f秒' % (end - start))
    except ValueError:
        print('Error!')

    unzip_file_path = path
    if not os.path.exists(unzip_file_path):
        os.mkdir(unzip_file_path)
    zip_file = zipfile.ZipFile(file_path)
    zip_list = zip_file.namelist()
    for f in zip_list:
        zip_file.extract(f, unzip_file_path)
    zip_file.close()
    print('successfully unzip download dataset from website')


url = "https://openi.pcl.ac.cn/attachments/427e823f-ab52-45a7-9f43-98371c7d5b1e?type=0"
# download_and_unzip(url, os.path.join(os.getcwd(), '/'))
download_and_unzip(url, os.path.join(os.getcwd(), 'dataset'))

import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import numpy as np


def create_dataset_imagenet(data_path, num_parallel_workers=None):
    data_set = ds.ImageFolderDataset(data_path, num_parallel_workers=num_parallel_workers, shuffle=True, decode=True)

    transform_img = [
        vision.Resize(image_size),
        vision.CenterCrop(image_size),
        vision.HWC2CHW(),
        lambda x: ((x / 255).astype('float32'), np.random.normal(size=(nz, 1, 1)).astype('float32'))
    ]

    data_set = data_set.map(input_columns='image', num_parallel_workers=num_parallel_workers, operations=transform_img,
                            output_columns=['image', 'latent_code'], column_order=['image', 'latent_code'])

    data_set = data_set.batch(batch_size)
    return data_set


data = create_dataset_imagenet(data_root, num_parallel_workers=workers)
size = data.get_dataset_size()
print(size)

import matplotlib.pyplot as plt

data_iter = next(data.create_dict_iterator(output_numpy=True))
plt.figure(figsize=(10, 3), dpi=140)
for i, image in enumerate(data_iter['image'][:30], 1):
    plt.subplot(3, 10, i)
    plt.axis('off')
    plt.imshow(image.transpose(1, 2, 0))
plt.show()

from mindspore import nn
from mindspore.common.initializer import Normal
from mindspore import load_checkpoint


def conv_t(in_channels, out_channels, kernel_size, stride=1, padding=0, pad_mode='pad'):
    weight_init = Normal(mean=0, sigma=0.02)
    return nn.Conv2dTranspose(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
                              padding=padding, weight_init=weight_init, pad_mode=pad_mode)


def bn(num_features):
    gamma_init = Normal(mean=1, sigma=0.02)
    return nn.BatchNorm2d(num_features=num_features, gamma_init=gamma_init)


class Generator(nn.Cell):
    def __init__(self):
        super(Generator, self).__init__()
        self.generator = nn.SequentialCell()
        self.generator.append(conv_t(nz, ngf * 8, 4, 1, 0))
        self.generator.append(bn(ngf * 8))
        self.generator.append(nn.ReLU())
        self.generator.append(conv_t(ngf * 8, ngf * 4, 4, 2, 1))
        self.generator.append(bn(ngf * 4))
        self.generator.append(nn.ReLU())
        self.generator.append(conv_t(ngf * 4, ngf * 2, 4, 2, 1))
        self.generator.append(bn(ngf * 2))
        self.generator.append(nn.ReLU())
        self.generator.append(conv_t(ngf * 2, ngf, 4, 2, 1))
        self.generator.append(bn(ngf))
        self.generator.append(nn.ReLU())
        self.generator.append(conv_t(ngf, nc, 4, 2, 1))
        self.generator.append(nn.Tanh())

    def construct(self, x):
        return Generator(x)


netG = Generator()
param_dict_G = load_checkpoint('./Net/Generator.ckpt', netG)
print(netG)

from mindspore import nn
from mindspore.common.initializer import Normal
from mindspore import load_checkpoint


def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, pad_mode='pad'):
    weight_init = Normal(mean=0, sigma=0.02)
    return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,
                     weight_init=weight_init, has_bias=False, pad_mode=pad_mode)


class Discriminator(nn.Cell):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.discriminator = nn.SequentialCell()
        self.discriminator.append(conv(nc, ndf, 4, 2, 1))
        self.discriminator.append((nn.LeakyReLU(.2)))
        self.discriminator.append(conv(ndf, ndf * 2, 4, 2, 1))
        self.discriminator.append(bn(ndf * 2))
        self.discriminator.append(nn.LeakyReLU(.2))
        self.discriminator.append(conv(ndf * 2, ndf * 4, 4, 2, 1))
        self.discriminator.append(bn(ndf * 4))
        self.discriminator.append(nn.LeakyReLU(0.2))
        self.discriminator.append(conv(ndf * 4, ndf * 8, 4, 2, 1))
        self.discriminator.append(bn(ndf * 8))
        self.discriminator.append(nn.LeakyReLU(0.2))
        self.discriminator.append(conv(ndf * 8, 1, 4, 1))
        self.discriminator.append(nn.Sigmoid())

    def construct(self, x):
        return self.discriminator(x)


netD = Discriminator()
parm_dict_D = load_checkpoint('./Net/Discriminator.ckpt', netD)
print(netD)

from mindspore import Tensor
from mindspore import dtype as mstype

criterion = nn.BCELoss(reduction='mean')
np.random.seed(1)
fixed_noise = Tensor(np.random.randn(4, nz, 1, 1), dtype=mstype.float32)

optimizerD = nn.Adam(netD.trainable_params(), learning_rate=lr, beta1=beta1)
optimizerG = nn.Adam(netG.trainable_params(), learning_rate=lr, beta1=beta1)

from mindspore import ops


class WithLossCellG(nn.Cell):
    def __init__(self, netD, netG, loss_fn):
        super(WithLossCellG, self).__init__(auto_prefix=True)
        self.netD = netD
        self.netG = netG

        self.loss_fn = loss_fn

    def construct(self, latent_code):
        fake_data = self.netG(latent_code)
        out = self.netD(fake_data)
        label_real = ops.OnesLike()(out)
        loss = self.loss_fn(out, label_real)
        return loss


class WithLossCellD(nn.Cell):
    def __init__(self, netD, netG, loss_fn):
        super(WithLossCellD, self).__init__(auto_prefix=True)
        self.netD = netD
        self.netG = netG
        self.loss_fn = loss_fn

    def construct(self, real_data, latent_code):
        out_real = self.netD(real_data)
        label_real = ops.OnesLike()(out_real)
        loss_real = self.loss_fn(out_real, label_real)

        fake_data = self.netG(latent_code)
        fake_data = ops.stop_gradient(fake_data)
        out_fake = self.netD(fake_data)
        label_fake = ops.ZerosLike()(out_fake)
        loss_fake = self.loss_fn(out_fake, label_fake)
        return loss_real + loss_fake


class DCGAN(nn.Cell):
    def __init__(self, myTrainOneStepCellForD, myTrainOneStepCellForG):
        super(DCGAN, self).__init__(auto_prefix=True)
        self.myTrainOneStepCellForD = myTrainOneStepCellForD
        self.myTrainOneStepCellForG = myTrainOneStepCellForG

    def construct(self, real_data, latent_code):
        output_D = self.myTrainOneStepCellForD(real_data, latent_code).view(-1)
        netD_loss = output_D.mean()
        output_G = self.myTrainOneStepCellForG(latent_code).view(-1)
        netG_loss = output_G.mean()
        return netD_loss, netG_loss


netD_with_criterion = WithLossCellD(netD, netG, criterion)
netG_with_criterion = WithLossCellG(netD, netG, criterion)
myTrainOneStepCellD = nn.TrainOneStepCell(netD_with_criterion, optimizerD)
myTrainOneStepCellG = nn.TrainOneStepCell(netG_with_criterion, optimizerG)

from mindspore import ops
from mindspore import save_checkpoint

dcgan = DCGAN(myTrainOneStepCellD, myTrainOneStepCellG)
dcgan.set_train()
data_loader = data.create_dict_iterator(output_numpy=True, num_epochs=num_epochs)
G_losses = []
D_losses = []
image_list = []
print('starting training loop...')

for epoch in range(num_epochs):
    for i, d in enumerate(data_loader):
        real_data = Tensor([d['image']])
        latent_code = Tensor(d['latent_code'])
        netD_loss, netG_loss = dcgan(real_data, latent_code)

        if i % 50 == 0 or i == size - 1:
            print('[%2d/%d][%3d/%d] Loss_D:%7.4f loss_G:%7.4f' % (
                epoch + 1, num_epochs, i + 1, size, netD_loss.asnumpy(), netG_loss.asnumpy()
            ))
        D_losses.append(netD_loss.asnumpy())
        G_losses.append(netG_loss.asnumpy())

        img = netG(fixed_noise)
        image_list.append(img.transpose(0, 2, 3, 1).asnumpy())

save_checkpoint(netG, 'Generator_F.ckpt')
save_checkpoint(netD, 'Discriminator_F.ckpt')

plt.figure(figsize=(10, 5))
plt.title('Generator and Discriminator Loss During Training')
plt.plot(G_losses, label="G", color='blue')
plt.plot(D_losses, label='D', color='orange')
plt.xlabel('iterations')
plt.ylabel('Loss')
plt.legend()
plt.show()

parm_dict = load_checkpoint('./Generator_F.ckpt', netG)
image64 = netG(fixed_noise).transpose(0, 2, 3, 1).asnumpy()
fig = plt.figure(figsize=(8, 3), dpi=120)
images = []
for i in range(3):
    images.append(np.concatenate((image64[i * 8:(i + 1) * 8]), axis=1))
img = np.clip(np.concatenate((images[:]), axis=0), 0, 1)
plt.axis('off')
plt.imshow(image)
plt.show()
