import numpy as np
from mindspore import Tensor, context
from mindspore import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

x = Tensor(0.1)

arr = np.array([1, 0, 1, 0])
x_np = Tensor(arr)


from mindspore import Tensor
from mindspore import set_seed
from mindspore import dtype as mstype
from mindspore.common.initializer import One, Normal
set_seed(1)
tensor1 = Tensor(shape=(2, 2), dtype=mstype.float32, init=One())
tensor2 = Tensor(shape=(2, 2), dtype=mstype.float32, init=Normal
())
print(tensor1)
print(tensor2)


from mindspore import ops
oneslike = ops.OnesLike()
x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
output = oneslike(x)
print(output)


import mindspore.ops as ops
shape = (2, 2)
ones = ops.Ones()
output = ones(shape, mstype.float32)
print(output)
zeros = ops.Zeros()
output = zeros(shape, mstype.float32)
print(output)



t1 = Tensor(np.zeros([1, 2, 3]), mstype.float32)
print("Datatype of tensor: {}".format(t1.dtype))
print("Shape of tensor: {}".format(t1.shape))



tensor = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32))
print("First row: {}".format(tensor[0]))
print("First column: {}".format(tensor[:, 0]))
print("Last column: {}".format(tensor[..., -1]))


data1 = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32))
data2 = Tensor(np.array([[4, 5], [6, 7]]).astype(np.float32))
op = ops.Concat()
output = op((data1, data2))
print(output)


data1 = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32))
data2 = Tensor(np.array([[4, 5], [6, 7]]).astype(np.float32))
op = ops.Stack()
output = op([data1, data2])
print(output)



input_x = Tensor(np.array([1.0, 2.0, 3.0]), mstype.float32)
input_y = Tensor(np.array([4.0, 5.0, 6.0]), mstype.float32)
mul = ops.Mul()
output = mul(input_x, input_y)
print(output)

zeros = ops.Zeros()
output = zeros((2, 2), mstype.float32)
print("output: {}".format(type(output)))
n_output = output.asnumpy()
print("n_output: {}".format(type(n_output)))

output = np.array([1, 0, 1, 0])
print("output: {}".format(type(output)))
t_output = Tensor(output)
print("t_output: {}".format(type(t_output)))



import mindspore.dataset as ds
DATA_DIR = "E:/deeplearning/cifar-10-batches-bin"
sampler = ds.SequentialSampler(num_samples=5)
dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler)

for data in dataset.create_dict_iterator():
 print("Image shape: {}".format(data['image'].shape), ", Label:{}".format(data['label']))

import numpy as np

np.random.seed(58)


class DatasetGenerator:
     def __init__(self):
         self.data = np.random.sample((5, 2))

         self.label = np.random.sample((5, 1))

     def __getitem__(self, index):
         return self.data[index], self.label[index]

     def __len__(self):
         return len(self.data)

dataset_generator = DatasetGenerator()
dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=False)
for data in dataset.create_dict_iterator():
    print('{}'.format(data["data"]), '{}'.format(data["label"]))



ds.config.set_seed(58)
# 随机打乱数据顺序
dataset = dataset.shuffle(buffer_size=10)
# 对数据集进行分批
dataset = dataset.batch(batch_size=2)
for data in dataset.create_dict_iterator():
 print("data: {}".format(data["data"]))
 print("label: {}".format(data["label"]))

import matplotlib.pyplot as plt
from mindspore.dataset.vision import Inter
import mindspore.dataset.vision.c_transforms as c_vision
DATA_DIR = "E:/deeplearning/MNIST_Data/train"
mnist_dataset = ds.MnistDataset(DATA_DIR, num_samples=6, shuffle=False)
# 查看数据原图
mnist_it = mnist_dataset.create_dict_iterator()
data = next(mnist_it)
plt.figure(figsize=(3, 3))
plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray)
plt.title(data['label'].asnumpy(), fontsize=20)
plt.show()

resize_op = c_vision.Resize(size=(200, 200), interpolation=Inter.LINEAR)
crop_op = c_vision.RandomCrop(150)
transforms_list = [resize_op, crop_op]
mnist_dataset = mnist_dataset.map(operations=transforms_list, input_columns=["image"])



mnist_dataset = mnist_dataset.create_dict_iterator()
data = next(mnist_dataset)
plt.figure(figsize=(3, 3))
plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray)
plt.title(data['label'].asnumpy(), fontsize=20)
plt.show()



import csv
import os
import time
import numpy as np
from easydict import EasyDict as edict
from matplotlib import pyplot as plt
import mindspore
from mindspore import nn
from mindspore import context
from mindspore import dataset
from mindspore.train.callback import TimeMonitor, LossMonitor
from mindspore import Tensor
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig

cfg = edict({
 'data_size': 150,
 'epoch_size': 20, # 训练 20 次
 'lr' :0.01, # 学习率
 'train_size': 120, # 训练集大小
 'test_size': 30 , # 测试集大小
 'feature_number': 4, # 输入特征数
 'num_class': 3, # 分类类别
 'batch_size': 30,
 'data_dir': "E:/deep/Iris.data",
 'save_checkpoint_steps': 5, # 多少步保存一次模型
 'keep_checkpoint_max': 1, # 最多保存多少个模型
 'out_dir_momentum': './model_iris/momentum',})
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")


with open(cfg.data_dir) as csv_file:
 data = list(csv.reader(csv_file, delimiter=','))
# 查看前 5 笔数据
data[:5]


# 共 150 条数据，将数据集的 4 个属性作为自变量 X。将数据集的 3 个类别映射为{0, 1，
#2}，作为因变量 Y
label_map = {'Iris-setosa': 0,'Iris-versicolor': 1,'Iris-virginica':2 }
# 分别获取数据中的特征值 X 和标签值 Y
X = np.array([[float(x) for x in s[:-1]] for s in data[:cfg.data_size]], np.float32)
Y = np.array([label_map[s[-1]] for s in data[:cfg.data_size]], np.int32)
# 将数据集分为训练集 120 条，测试集 30 条。
train_idx = np.random.choice(cfg.data_size, cfg.train_size, replace=False)
test_idx = np.array(list(set(range(cfg.data_size)) - set(train_idx)))
X_train, Y_train = X[train_idx], Y[train_idx]
X_test, Y_test = X[test_idx], Y[test_idx]

 #使用 MindSpore GeneratorDataset 接口将 numpy.ndarray 类型的数据转换为 Dataset
def gen_data(X_train, Y_train, epoch_size):
 # 生成训练集
 XY_train = list(zip(X_train, Y_train))
 ds_train = dataset.GeneratorDataset(XY_train, ['x', 'y'])
 # 设定数据集大小
 # 打乱操作并设定 batchsize
 ds_train = ds_train.shuffle(buffer_size=cfg.train_size).batch(cfg.batch_size,
drop_remainder=True)
 # 生成测试集
 XY_test = list(zip(X_test, Y_test))
 ds_test = dataset.GeneratorDataset(XY_test, ['x', 'y'])
 # 设定数据集大小
 # 打乱操作并设定 batchsize
 ds_test = ds_test.shuffle(buffer_size=cfg.test_size).batch(cfg.test_size,
drop_remainder=True)
 return ds_train, ds_test

ds_train, ds_test = gen_data(X_train, Y_train, cfg.epoch_size) # 生成训练集和测试集

# 定义网络
# 定义一个全连接网络层，输入特征为 4，输出类别为 3
network = nn.Dense(cfg.feature_number, cfg.num_class)
# 网络损失函数
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
# 优化器
# 用 momentum 优化器进行优化，学习率为 0.01，动量大小为 0.9
net_opt = nn.Momentum(network.trainable_params(), cfg.lr, 0.9)

# 训练函数
def train(network, net_opt, ds_train,net_loss, prefix, directory, print_times):
 # 模型编译
 model = Model(network, loss_fn=net_loss, optimizer=net_opt, metrics={"acc"})
 # 定义损失值指标
 loss_cb = LossMonitor(per_print_times=int(cfg.train_size / cfg.batch_size))
 # 设置 checkpoint
 config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
 keep_checkpoint_max=cfg.keep_checkpoint_max)
 ckpoint_cb = ModelCheckpoint(prefix=prefix, directory=directory, config=config_ck)
 print("============== Starting Training ==============")
 # 训练模型
 model.train(cfg.epoch_size, ds_train, callbacks=[ckpoint_cb, loss_cb],
dataset_sink_mode=False)
 return model
# 用训练集训练网络，设置网络结构，优化器，模型名称，保存路径, print_times
model = train(network, net_opt, ds_train,net_loss, "checkpoint_momentum",
cfg.out_dir_momentum, 4)

class_names=['setosa', 'versicolor', 'virginica']
# 评估预测函数
def eval_predict(model, ds_test):
 # 使用测试集评估模型，打印总体准确率
 metric = model.eval(ds_test)
 print(metric)
 # 预测
 test_ = ds_test.create_dict_iterator().__next__()
 test = Tensor(test_['x'], mindspore.float32)
 predictions = model.predict(test)
 predictions = predictions.asnumpy()
 true_label = test_['y'].asnumpy()
 for i in range(10):
  p_np = predictions[i, :]
  pre_label = np.argmax(p_np)
  print('第' + str(i) + '个 sample 预测结果：', class_names[pre_label], ' 真实结果：',
class_names[true_label[i]])
# 评估预测
eval_predict(model, ds_test) # 用测试集进行预测

os.listdir('./model_iris/momentum') # 查看保存的模型
