import torch
import torch.nn as nn
from torch.nn import init
import torch.utils.data
import matplotlib.pyplot as plt
import random

#数据集
train_data = torch.ones(7000, 200)  # 数据的基本形态
test_data = torch.ones(3000, 200)  # 数据的基本形态
train1 = torch.normal(-1 * train_data, 1)
test1 = torch.normal(-1 * test_data, 1)
train2 = torch.normal(1 * train_data, 1)
test2 = torch.normal(1 * test_data, 1)
label_train1 = torch.zeros(7000)
label_train2 = torch.ones(7000)
label_test1 = torch.zeros(3000)
label_test2 = torch.ones(3000)

train_features = torch.cat((train1, train2), 0).type(torch.FloatTensor)
train_labels = torch.cat((label_train1, label_train2), 0)
test_features = torch.cat((test1, test2), 0).type(torch.FloatTensor)
test_labels = torch.cat((label_test1, label_test2), 0)


def data_iter(batch_size,features,labels,shuffle):
    num_examples = len(features)
    indices = list(range(num_examples))
    if shuffle == True:
        random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        j = torch.LongTensor(indices[i:min(i+batch_size,num_examples)])
        yield features.index_select(0,j),labels.index_select(0,j)

#两层神经网络，隐藏层有100个神经元
class Logistic(nn.Module):
    def __init__(self):
        super(Logistic, self).__init__()
        self.linear1 = nn.Linear(200, 100)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(100, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        y_hat = self.linear1(x)
        y_hat = self.relu(y_hat)
        y_hat = self.linear2(y_hat)
        y_hat = self.sigmoid(y_hat)
        return y_hat

#添加一次性读取数据量
batch_size = 220
#添加迭代次数
num_epochs = 15
#添加学习率
lr = 0.00005

net = Logistic()
loss = nn.BCELoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lr)



def train(net, num_epochs, batch_size, train_features, train_labels, test_features, test_labels, loss):
    ls_train, ls_test, x_epochs = [], [], []

    for epoch in range(num_epochs):
        train_loss_sum, train_num, train_acc = 0.0, 0.0, 0.0
        test_loss_sum, test_num, test_acc = 0.0, 0.0, 0.0

        for X, y in data_iter(batch_size, train_features, train_labels, True):
            y_hat = net(X)
            y_hat = y_hat.squeeze(-1)
            l = loss(y_hat, y).sum()
            train_loss_sum += l.item() * X.shape[0]
            optimizer.zero_grad
            l.backward()
            optimizer.step()
            train_num += X.shape[0]
            train_acc += (y_hat.gt(0.5) == y).sum().item()
        ls_train.append(train_loss_sum / train_num)

        for X, y in data_iter(batch_size, test_features, test_labels, True):
            y_hat = net(X)
            y_hat = y_hat.squeeze(-1)
            l = loss(y_hat, y).sum()
            test_loss_sum += l.item() * X.shape[0]
            test_num += X.shape[0]
            test_acc += (y_hat.gt(0.5) == y).sum().item()
        ls_test.append(test_loss_sum / test_num)
        x_epochs.append(epoch + 1)
        print("epoch %d,train loss %f,train acc %f,test loss %f,test acc %f" % (epoch + 1, train_loss_sum / train_num, train_acc / train_num, test_loss_sum / test_num, test_acc / test_num))

    plt.plot(x_epochs, ls_train, label="train_loss", linewidth=2)
    plt.plot(x_epochs, ls_test, label="test_loss", linewidth=1.5)
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.legend()
    plt.show()

train(net, num_epochs, batch_size, train_features, train_labels, test_features, test_labels, loss)




