#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 3.PyTorch全连接层原理和使用.py
# @Author: Richard Chiming Xu
# @Date  : 2021/11/7
# @Desc  :

import numpy as np
import torch
from torch import nn
from torch.autograd import Variable

import torch.utils.data as Data

# 制作数据
def gen_data():
    # 生成数据
    def f(x):
        return 10 * x + 4

    x = np.linspace(0, 10, 1000).astype(np.float32)+ np.random.uniform(0,1,size=1)[0]
    y = f(x).astype(np.float32)

    x = x.reshape(1000, 1)
    y = y.reshape(1000, 1)
    return x,y

# 转换与初始化参数
def trans_tensor(x, y):
    # 转换张量
    x_train = torch.from_numpy(x)
    y_train = torch.from_numpy(y)

    # 初始化参数
    w = Variable(torch.randn(1), requires_grad=True)
    b = Variable(torch.zeros(1), requires_grad=True)

    # 构建线性回归模型
    x_train = Variable(x_train)
    y_train = Variable(y_train)
    return x_train, y_train, w, b

def linear_model(X, w, b):
    return X * w + b

# 定义网络模型
class MatrixLinearNet(nn.Module):
    def __init__(self, learning_rate):
        super(MatrixLinearNet, self).__init__()
        self.w = Variable(torch.randn(1), requires_grad=True)
        self.b = Variable(torch.zeros(1), requires_grad=True)
        self.learning_rate = learning_rate
        # self.linear = nn.Linear(n_feature, 1)

    # forward 定义前向传播
    def forward(self, X):
        y = X * self.w + self.b
        return y

    def update_params(self):
        self.w.data = self.w.data - self.learning_rate * self.w.grad.data
        self.b.data = self.b.data - self.learning_rate * self.b.grad.data

        self.w.grad.zero_()
        self.b.grad.zero_()

# 定义网络模型
class NNLinearNet(nn.Module):
    def __init__(self, n_feature):
        super(NNLinearNet, self).__init__()
        self.linear = nn.Linear(n_feature, 1)

    # forward 定义前向传播
    def forward(self, X):
        y = self.linear(X)
        return y


def train(model_type):
    x, y = gen_data()
    x_train, y_train, w, b = trans_tensor(x, y)

    # 将训练数据的特征和标签组合
    dataset = Data.TensorDataset(x_train, y_train)
    # 随机读取小批量
    data_iter = Data.DataLoader(dataset, 10, shuffle=True)
    # 定义训练批次
    epochs = 10
    learning_rate = 0.01
    if model_type == 0: # 使用矩阵方式实现网络
        # 创建矩阵模型
        model = MatrixLinearNet(learning_rate)
        loss = nn.MSELoss()

        for epoch in range(epochs):
            for X, y in data_iter:
                output = model(X)
                l = loss(output, y.view(-1, 1))
                l.backward()

                model.update_params()
            print('epoch %d, loss: %f' % (epoch, l.item()))
    else: # 使用nn.Linear实现网络
        model = NNLinearNet(len(x[0]))
        loss = nn.MSELoss()
        optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
        for epoch in range(epochs):
            for X, y in data_iter:
                output = model(X)
                l = loss(output, y.view(-1, 1))
                optimizer.zero_grad()  # 梯度清零，等价于net.zero_grad()
                l.backward()
                optimizer.step()
            print('epoch %d, loss: %f' % (epoch, l.item()))

type_dict = {
    'matrix': 0,
    'nn': 1
}
train(type_dict['nn'])