# 1.手动实现前馈神经网络解决上述二分类任务
import random
import numpy as np
from common import *


def generate_data(size_, ρ_):
    # 产生size_, ρ_维的数据集元数据
    x_data = torch.tensor(data=np.random.normal(0.2, 0.05, (size_, ρ_)), dtype=torch.float)
    x_label = torch.zeros(size_)
    y_data = torch.tensor(data=np.random.normal(-0.2, 0.05, (size_, ρ_)), dtype=torch.float)
    y_label = torch.ones(size_)

    return x_data, x_label, y_data, y_label


class Net(torch.nn.Module):

    def __init__(self, n_input, n_hidden, n_output):
        super(Net, self).__init__()
        # 定义层的形式
        self.hidden = torch.nn.Linear(n_input, n_hidden)
        self.output = torch.nn.Linear(n_hidden, n_output)

    def forward(self, x):
        # 隐藏层输出
        hidden = torch.nn.functional.relu(self.hidden(x))
        # 输出层
        y_predict = self.output(hidden)
        return y_predict

    def train(self, batch_size, feathers, labels, test_data, test_labels, lr, train_num):
        # 优化器
        optimizer = torch.optim.SGD(self.parameters(), lr)
        # 损失函数,均方差
        loss_func = torch.nn.MSELoss()
        # 记录loss
        train_l = []
        test_l = []
        # 迭代训练
        for i in range(train_num):
            # 预测
            y_predict = self(feathers)
            # 计算误差
            loss = loss_func(y_predict, labels)
            # 情况上一次参与更新值
            optimizer.zero_grad()
            # 计算反向传播
            loss.backward()
            # 更新参数
            optimizer.step()
            # print(str(i) + "----->" + "loss:" + str(loss.data.numpy()))

            # 处理train loss
            train_l_sum = loss.sum().item()
            mask = y_predict.ge(0.5).float().squeeze()  # 以0.5为阈值进行分类(大于0.5的为1)
            acc = (mask == labels).sum().item()  # 计算正确预测的样本个数
            train_l.append(train_l_sum/labels.shape[0])  # 加入

            # test data
            y_predict_test = self(test_data)
            # 计算误差
            test_loss = loss_func(y_predict_test, test_labels)
            test_l.append(test_loss.sum().item()/test_labels.shape[0])
            print('epoch %d, loss %.4f, train acc %.3f, test loss %.3f'
                  % (i + 1, train_l_sum, acc, test_loss))
        return train_l, test_l


def loss_(y_hat, y):
    return -torch.mean(y*torch.log(y_hat)+(1-y)*torch.log(1-y_hat))


# 产生10000个500维数据集，并分开7000训练，3000测试
size, ρ = 10000, 200
x_data, x_label, y_data, y_label = generate_data(size, ρ)
x_train, y_train, x_test, y_test = train_test_split(x_data, y_data, 0.7)
x_label_train, y_label_train, x_label_test, y_label_test = train_test_split(x_label, y_label, 0.7)
# 打乱数据
feathers = torch.cat((x_train, y_train), dim=0).type(torch.FloatTensor)
labels = torch.cat((x_label_train, y_label_train), dim=0).type(torch.FloatTensor)

test_data = torch.cat((x_test, y_test), dim=0).type(torch.FloatTensor)
test_labels = torch.cat((x_label_test, y_label_test), dim=0).type(torch.FloatTensor)

# 绘制数据的图像
# plt.scatter(feathers[:, 1], feathers[:, 2], c=labels.numpy())
# plt.show()
# 初始化参数
num_inputs, num_hidden, num_outs = 200, 2, 1

# 开始训练 学习率0.1  训练次数5
batch_size = 100
num_workers = 0  # 多线程数
num_epochs, lr = 20, 0.1

if __name__ == '__main__':
    test_net = Net(num_inputs, num_hidden, num_outs)
    train_loss_avg, test_l_avg = test_net.train(batch_size, feathers, labels, test_data, test_labels, lr, num_epochs)
    loss_draw(range(1, num_epochs + 1), train_loss_avg, "epochs", "loss", range(1, num_epochs + 1), test_l_avg,
              ['train', 'test'])
