import torch
import matplotlib.pyplot as plt
import random

#数据集
train1 = torch.normal(-1, 1, size=(7000, 200))
test1 = torch.normal(-1, 1, size=(3000, 200))
train2 = torch.normal(1, 1, size=(7000, 200))
test2 = torch.normal(1, 1, size=(3000, 200))
label_train1 = torch.zeros(7000)
label_train2 = torch.ones(7000)
label_test1 = torch.zeros(3000)
label_test2 = torch.ones(3000)

train_features = torch.cat((train1, train2), 0).type(torch.FloatTensor)
train_labels = torch.cat((label_train1, label_train2), 0)
test_features = torch.cat((test1, test2), 0).type(torch.FloatTensor)
test_labels = torch.cat((label_test1, label_test2), 0)


def data_iter(batch_size,features,labels,shuffle):
    num_examples = len(features)
    indices = list(range(num_examples))
    if shuffle == True:
        random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        j = torch.LongTensor(indices[i:min(i+batch_size,num_examples)])
        yield features.index_select(0,j),labels.index_select(0,j)

def relu(x):
    return torch.max(input = x, other=torch.tensor(0.0))

def sigmoid(x):
    return 1.0/(1+torch.exp(-x))

def crossloss(output, label):
    output=output.squeeze(-1)
    return -(label*torch.log(output)+(1-label)*torch.log(1-output))

def sgd(params,lr,batch_size):
    for param in params:
        param.data-=lr*param.grad/batch_size
        param.grad.zero_()

num_inputs = 200
num_hidden = 100
num_outputs = 1





def train(batch_size, num_epochs, lr, train_features, train_labels, test_features, test_labels):
    train_ls, test_ls, x_epoch, list_train_acc, list_test_acc=[], [], [], [], []
    w1 = torch.normal(0, 0.01, size=(num_inputs, num_hidden), requires_grad=True)
    b1 = torch.zeros(num_hidden, requires_grad=True)
    w2 = torch.normal(0, 0.01, size=(num_hidden, num_outputs), requires_grad=True)
    b2 = torch.zeros(num_outputs, requires_grad=True)

    for epoch in range(num_epochs):
        train_loss_sum, train_num, train_acc = 0, 0, 0
        test_loss_sum,test_num,test_acc=0, 0, 0

        for x,y in data_iter(batch_size, train_features, train_labels, True):
            hidden = relu(torch.mm(x, w1) + b1)
            output = sigmoid(torch.mm(hidden, w2) + b2)
            loss = crossloss(output, y).sum()
            train_loss_sum += loss.item()
            output_temp = output.squeeze(-1)     
            train_acc += (output_temp.gt(0.5) == y).sum().item()
            train_num += x.shape[0]
            loss.backward()
            sgd([w1, b1, w2, b2], lr, batch_size)
        train_ls.append(train_loss_sum/train_num)
        x_epoch.append(epoch + 1)

        for x,y in data_iter(batch_size, test_features, test_labels, False):
            hidden = relu(torch.mm(x, w1) + b1)
            output = sigmoid(torch.mm(hidden, w2) + b2)
            loss = crossloss(output, y).sum()
            test_loss_sum += loss.item()
            output_temp = output.squeeze(-1)
            test_acc += (output_temp.gt(0.5) == y).sum().item()
            test_num += x.shape[0]
        test_ls.append(test_loss_sum/test_num)
        list_train_acc.append(train_acc/train_num)
        list_test_acc.append(test_acc/test_num)
        # print("epoch %d,train loss %f,train acc %f,test loss %f,test acc %f"%(epoch + 1, train_loss_sum/train_num, train_acc/train_num, test_loss_sum/test_num, test_acc/test_num))

    return train_ls, test_ls, list_train_acc, list_test_acc
# train(batch_size, num_epochs, lr, train_features, train_labels, test_features, test_labels)

def get_k_fold_data(k, i, X, y):
    # 返回第i折交叉验证时所需要的训练和验证数据，分开放，X_train为训练数据，X_valid为验证数据
    assert k > 1
    fold_size = X.shape[0] // k  # 双斜杠表示除完后再向下取整
    X_train, y_train = None, None
    for j in range(k):
        idx = slice(j * fold_size, (j + 1) * fold_size)  #slice(start,end,step)切片函数
        X_part, y_part = X[idx, :], y[idx]
        if j == i:
            X_valid, y_valid = X_part, y_part
        elif X_train is None:
            X_train, y_train = X_part, y_part
        else:
            X_train = torch.cat((X_train, X_part), dim=0)
            y_train = torch.cat((y_train, y_part), dim=0)
    return X_train, y_train, X_valid, y_valid

def k_fold(k, X_train, y_train, num_epochs, batch_size):
    for i in range(k):
        data = get_k_fold_data(k, i, X_train, y_train) # 获取k折交叉验证的训练和验证数据
        train_ls, valid_ls, train_acc, valid_acc = train(batch_size, num_epochs, lr, *data)
        train_l_sum, valid_l_sum, train_acc_sum, valid_acc_sum = 0.0, 0.0, 0.0, 0.0
        for ii in train_ls:
            train_l_sum += ii
        for jj in valid_ls:
            valid_l_sum += jj
        for ii in train_acc:
            train_acc_sum +=ii
        for jj in valid_acc:
            valid_acc_sum +=jj
        print('fold %d, train loss %.4f, valid loss %.4f, train acc %.4f, test acc %.4f' % (i + 1, train_l_sum / num_epochs, valid_l_sum / num_epochs, train_acc_sum / num_epochs, valid_acc_sum / num_epochs))

batch_size = 256
num_epochs = 10
lr = 0.1
fold = 10
k_fold(fold, train_features, train_labels, num_epochs, batch_size)



