import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as CV
from mindspore import nn, Tensor, Model
from mindspore import dtype as mstype
from mindspore.train.callback import ModelCheckpoint
import numpy as np
import genDatabase

DATA_DIR = "./datasets/cifar-10-batches-bin"

# 定义神经网络
class Net(nn.Cell):
    def __init__(self, num_class=10, num_channel=3):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
        self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
        self.fc1 = nn.Dense(16 * 5 * 5, 120)
        self.fc2 = nn.Dense(120, 84)
        self.fc3 = nn.Dense(84, num_class)
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()

    def construct(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.max_pool2d(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.max_pool2d(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.fc3(x)
        return x

net = Net(num_class=2384)
epochs = 1000
batch_size = 4
learning_rate = 1e-3

# 构建数据集
# sampler = ds.SequentialSampler(num_samples=128)
# dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler)

dataset = genDatabase.getDataSet()

for data in dataset.create_dict_iterator():
    print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label']))

# 数据类型转换
type_cast_op_image = C.TypeCast(mstype.float32)
type_cast_op_label = C.TypeCast(mstype.int32)
HWC2CHW = CV.HWC2CHW()
dataset = dataset.map(operations=[type_cast_op_image, HWC2CHW], input_columns="image")
dataset = dataset.map(operations=type_cast_op_label, input_columns="label")
dataset = dataset.batch(batch_size)

# 定义超参、损失函数及优化器
optim = nn.SGD(params=net.trainable_params(), learning_rate=learning_rate)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

ckpt_cb = ModelCheckpoint()

# 输入训练轮次和数据集进行训练
model = Model(net, loss_fn=loss, optimizer=optim)
model.train(epoch=epochs, train_dataset=dataset, dataset_sink_mode=False)

data_item = dataset.create_dict_iterator()
data = next(data_item)

images = data["image"].asnumpy()
labels = data["label"].asnumpy()

output = model.predict(Tensor(data['image']))
predicted = np.argmax(output.asnumpy(), axis=1)


# print(images.shape)
# print(data["image"].shape)
# print(images)
# print(output)
# print(predicted)
# print(labels)


wrong = 0
right = 0
for (pd , lb) in zip(predicted,labels):
    if(pd != lb):
        wrong = wrong + 1
    else:
        right = right + 1

print("accuracy:",right/(right + wrong) * 100,"%")
