import glob
import multiprocessing
import os

import mindspore as ms
from PIL import Image
from mindspore import nn
import mindspore.dataset as ds
from day2_ChangeFormerV6.dataset.CD_dataset import CDDataset
from day2_ChangeFormerV6.models.ChangeFormer import ChangeFormerV6
from day2_ChangeFormerV6.utils.custom_with_cell import CustomWithTrainCell
from day2_ChangeFormerV6.utils.loss import CrossEntropyWithLogits
from mindspore.common.initializer import One, Normal
from mindspore import context
from day2_ChangeFormerV6.dataset.cd_dataset_new import create_dataset_new
import numpy as np
from mindspore import ops
from mindspore import save_checkpoint

context.set_context(mode=ms.PYNATIVE_MODE,device_target="CPU")

from mindspore import dtype as mstype

def create_dataset(batch = 16):
    # instanceof dataset
    dataset_generator = CDDataset(root_dir=r'E:\ChangeFormer模型迁移相关内容\迁移准备\LEVIR-CD-256', img_size=256, split='val', is_train=True, label_transform='norm')

    dataset = ds.GeneratorDataset(dataset_generator, ["img_A", "img_B", "label"], shuffle=False)
    dataset = dataset.batch(batch_size=batch)
    return dataset

def get_data(num):
    """生成样本数据及对应的标签"""
    for _ in range(num):
        imgA = ms.Tensor(shape=(1, 3, 256, 256), dtype=mstype.float32, init=Normal())
        imgB = ms.Tensor(shape=(1, 3, 256, 256), dtype=mstype.float32, init=Normal())
        label = ms.Tensor(shape=(1, 1, 256, 256), dtype=mstype.uint32, init=Normal())
        yield imgA, imgB, label


def get_imgset(root, mode, batch_size=2):
    files_A = sorted(glob.glob(os.path.join(root, '%s/A' % mode) + '/*.*'))
    files_B = sorted(glob.glob(os.path.join(root, '%s/B' % mode) + '/*.*'))
    files_Label = sorted(glob.glob(os.path.join(root, '%s/label' % mode) + '/*.*'))

    concat_op = ops.Concat(axis=0)

    for img_index in range(0, len(files_A), batch_size):
        # imgA_list = []
        # imgB_list = []
        # label_list = []
        for i in range(batch_size):
            input_A = Image.open(files_A[(img_index+i) % len(files_A)]).convert('RGB')
            input_B = Image.open(files_B[(img_index+i) % len(files_B)]).convert('RGB')
            label = Image.open(files_Label[(img_index+i) % len(files_Label)]).convert('L')

            imgA = np.asarray(input_A).astype(np.float32)
            imgB = np.asarray(input_B).astype(np.float32)
            label = np.asarray(label).astype(np.uint8)

            # imgA = ops.expand_dims(ms.Tensor(imgA / 255.).transpose(-1, 0, 1), 0)
            imgA = ms.Tensor(imgA / 255.).transpose(-1, 0, 1)
            # imgB = ops.expand_dims(ms.Tensor(imgB / 255.).transpose(-1, 0, 1), 0)
            imgB = ms.Tensor(imgB / 255.).transpose(-1, 0, 1)
            # label = ops.expand_dims(ms.Tensor(label // 255), 0)
            label = ms.Tensor(label // 255)
            # imgA_list.append(imgA)
            # imgB_list.append(imgB)
            # label_list.append(label)

        # imgA_batch = concat_op(imgA_list)
        # imgB_batch = concat_op(imgB_list)
        # label_batch = concat_op(label_list)
        # print(imgA_batch.shape)

        # yield imgA_batch, imgB_batch, label_batch
            yield imgA, imgB, label


net = ChangeFormerV6(embed_dim=256)
optimizer = nn.Adam(net.trainable_params(), learning_rate=0.001, weight_decay=0.0005)


loss_fun = CrossEntropyWithLogits()
net_with_loss = CustomWithTrainCell(net, loss_fun)


# 定义训练网络，封装网络和优化器
train_net = nn.TrainOneStepCell(net_with_loss, optimizer)
# 设置网络为训练模式
train_net.set_train()

# 测试使用
# train_dataset = ds.GeneratorDataset(list(get_data(2)), column_names=['img_A', 'img_B', 'label'])
train_dataset = ds.GeneratorDataset(list(get_imgset(root=r'D:\git_test\Levir_sample', mode='val', batch_size=1)),
                                    column_names=['imgA', 'imgB', 'label'])
train_dataset = train_dataset.batch(1)
# train_dataset = create_dataset_new(batch=2)

step = 0
epochs = 2
steps = train_dataset.get_dataset_size()


# multiprocessing.freeze_support()

for epoch in range(epochs):

    print("555555")
    for d in train_dataset.create_dict_iterator():
        print("66666")
        print("img shape:", d['imgA'].shape)
        result = train_net(d["imgA"], d["imgB"], d["label"])
        print(f"Epoch: [{epoch} / {epochs}], "
              f"step: [{step} / {steps}], "
              f"loss: {result}")
              # )
        step = step + 1
        ms.save_checkpoint(net, './mynet.ckpt')
        print('finished save ckpt...')


