import torch
import matplotlib.pyplot as plt
import random

#数据集
#两个数据集的样本特征x的维度均为200，且分别服从均值互为相反数且方差相同的正态分布。
train_data = torch.ones(7000, 200)  # 数据的基本形态
test_data = torch.ones(3000, 200)  # 数据的基本形态
train1 = torch.normal(-1 * train_data, 1)
test1 = torch.normal(-1 * test_data, 1)
train2 = torch.normal(1 * train_data, 1)
test2 = torch.normal(1 * test_data, 1)
label_train1 = torch.zeros(7000)
label_train2 = torch.ones(7000)
label_test1 = torch.zeros(3000)
label_test2 = torch.ones(3000)

train_features = torch.cat((train1, train2), 0).type(torch.FloatTensor)
train_labels = torch.cat((label_train1, label_train2), 0)
test_features = torch.cat((test1, test2), 0).type(torch.FloatTensor)
test_labels = torch.cat((label_test1, label_test2), 0)


def data_iter(batch_size,features,labels,shuffle):
    num_examples = len(features)
    indices = list(range(num_examples))
    if shuffle == True:
        random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        j = torch.LongTensor(indices[i:min(i+batch_size,num_examples)])
        yield features.index_select(0,j),labels.index_select(0,j)

def relu(x):
    return torch.max(input = x, other=torch.tensor(0.0))

def sigmoid(x):
    return 1.0/(1+torch.exp(-x))

def crossloss(output, label):
    output=output.squeeze(-1)
    return -(label*torch.log(output)+(1-label)*torch.log(1-output))

def sgd(params,lr,batch_size):
    for param in params:
        param.data-=lr*param.grad/batch_size
        param.grad.zero_()

num_inputs = 200
num_hidden = 100
num_outputs = 1

w1=torch.normal(0,0.01,size=(num_inputs,num_hidden),requires_grad=True)
b1=torch.zeros(num_hidden,requires_grad=True)
w2=torch.normal(0,0.01,size=(num_hidden,num_outputs),requires_grad=True)
b2=torch.zeros(num_outputs,requires_grad=True)

#添加一次性读取数据量
batch_size = 100
#添加迭代次数
num_epochs = 10
#添加学习率
lr = 0.005

def train(batch_size, num_epochs, lr, train_features, train_labels, test_features, test_labels):
    train_ls, test_ls, x_epoch=[], [], []

    for epoch in range(num_epochs):
        train_loss_sum, train_num, train_acc = 0, 0, 0
        test_loss_sum,test_num,test_acc=0, 0, 0

        for x,y in data_iter(batch_size, train_features, train_labels, True):
            hidden = relu(torch.mm(x, w1) + b1)
            output = sigmoid(torch.mm(hidden, w2) + b2)
            loss = crossloss(output, y).sum()
            train_loss_sum += loss.item()
            output_temp = output.squeeze(-1)      #之所以要剪掉最后一个维度，size一下数据集的label便知
            train_acc += (output_temp.gt(0.5) == y).sum().item()
            train_num += x.shape[0]
            loss.backward()
            sgd([w1, b1, w2, b2], lr, batch_size)
        train_ls.append(train_loss_sum/train_num)
        x_epoch.append(epoch + 1)

        for x,y in data_iter(batch_size, test_features, test_labels, False):
            hidden = relu(torch.mm(x, w1) + b1)
            output = sigmoid(torch.mm(hidden, w2) + b2)
            loss = crossloss(output, y).sum()
            test_loss_sum += loss.item()
            output_temp = output.squeeze(-1)
            test_acc += (output_temp.gt(0.5) == y).sum().item()
            test_num += x.shape[0]
        test_ls.append(test_loss_sum/test_num)
        print("epoch %d,train loss %.5f,train acc %.5f,test loss %.5f,test acc %.5f"%(epoch + 1, train_loss_sum/train_num, train_acc/train_num, test_loss_sum/test_num, test_acc/test_num))

    plt.plot(x_epoch, train_ls, label="train_loss", linewidth=2)
    plt.plot(x_epoch, test_ls, label="test_loss", linewidth=1.5)
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.legend()
    plt.show()

train(batch_size, num_epochs, lr, train_features, train_labels, test_features, test_labels)