# coding=utf-8
# 简单线性函数拟合
import numpy as np
from mindspore import context
import matplotlib
import matplotlib.pyplot as plt
import mindspore.dataset as ds

matplotlib.use('TkAgg')
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")


def get_data(num, w=2.0, b=3.0):
    for _ in range(num):
        x = np.random.uniform(-10.0, 10.0)
        noise = np.random.normal(0, 1)
        y = x * w + b + noise
        yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32)


eval_data = list(get_data(50))
x_target_label = np.array([-10, 10, 0.1])
y_target_label = x_target_label * 2 + 3
x_eval_label, y_eval_label = zip(*eval_data)

plt.scatter(x_eval_label, y_eval_label, color="red", s=5)
plt.plot(x_target_label, y_target_label, color="green")
plt.title("Eval data")
plt.show()


# 定义数据增强函数
def create_dataset(num_data, batch_size=16, repeat_size=1):
    input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
    input_data = input_data.batch(batch_size)
    input_data = input_data.repeat(repeat_size)
    return input_data


data_number = 1600
batch_number = 16
repeat_number = 1

ds_train = create_dataset(data_number, batch_size=batch_number, repeat_size=repeat_number)
print("The dataset size of ds_train:", ds_train.get_dataset_size())
dict_datasets = next(ds_train.create_dict_iterator())

print(dict_datasets.keys())
print("The x label value shape:", dict_datasets["data"].shape)
print("The y label value shape:", dict_datasets["label"].shape)

from mindspore.common.initializer import Normal
from mindspore import nn


class LinearNet(nn.Cell):
    def __init__(self):
        super(LinearNet, self).__init__()
        self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))

    def construct(self, x):
        x = self.fc(x)
        return x


net = LinearNet()
model_params = net.trainable_params()
for param in model_params:
    print("初始参数：", param, param.asnumpy())

# 从上图中可以看出，蓝色线条的初始化模型函数与绿色线条的目标函数还是有较大的差别的。
from mindspore import Tensor

x_model_label = np.array([-10, 10, 0.1])
y_model_label = (x_model_label * Tensor(model_params[0]).asnumpy()[0][0] +
                 Tensor(model_params[1]).asnumpy()[0])

plt.axis([-10, 10, -20, 25])
plt.scatter(x_eval_label, y_eval_label, color="red", s=5)
plt.plot(x_model_label, y_model_label, color="blue")
plt.plot(x_target_label, y_target_label, color="green")
plt.show()

# 定义前向传播网络与反向传播网络并关联

# 前向传播网络包含两个部分: 1.将参数带入到模型网络中得出预测值。2.使用预测值和训练数据计算出loss值。
net = LinearNet()
net_loss = nn.loss.MSELoss()
# 定义反向传播网络:
# 函数中所有的权重值更新完成后，将值传入到模型函数中，这个过程就是反向传播过程，实现此过程需要使用MindSpore中的优化器函数
opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9)
# 关联前向和反向传播网络
from mindspore import Model

model = Model(net, net_loss, opt)

# 拟合过程可视化准备

# 定义绘图函数
import matplotlib.pyplot as plt
import time


def plot_model_and_datasets(net, eval_data):
    weight = net.trainable_params()[0]
    bias = net.trainable_params()[1]
    x = np.arange(-10, 10, 0.1)
    y = x * Tensor(weight).asnumpy()[0][0] + Tensor(bias).asnumpy()[0]
    x1, y1 = zip(*eval_data)
    x_target = x
    y_target = x_target * 2 + 3

    plt.axis([-11, 11, -20, 25])
    plt.scatter(x1, y1, color="red", s=5)
    plt.plot(x, y, color="blue")
    plt.plot(x_target, y_target, color="green")
    plt.show()
    time.sleep(0.2)


# 定义回调函数
from IPython import display
from mindspore.train.callback import Callback


class ImageShowCallback(Callback):
    def __init__(self, net, eval_data):
        self.net = net
        self.eval_data = eval_data

    def step_end(self, run_context):
        plot_model_and_datasets(self.net, self.eval_data)
        display.clear_output(wait=True)


# 执行训练
epoch = 1
imageshow_cb = ImageShowCallback(net, eval_data)
# model.train(epoch, ds_train, callbacks=[imageshow_cb], dataset_sink_mode=False)
model.train(epoch, ds_train, dataset_sink_mode=False)
# 训练完成后打印出最终模型的权重参数，其中weight接近于2.0，bias接近于3.0，模型训练完成，符合预期。
plot_model_and_datasets(net, eval_data)
for net_param in net.trainable_params():
    print("训练后：", net_param, net_param.asnumpy())
