import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import torch.optim as optim
from torch.utils.data import TensorDataset

model = nn.Sequential(
    nn.Linear(30, 1),
    nn.Sigmoid(),
)
# for i in model.parameters():
#     print(i)
# print(model.parameters())
#
print(model.state_dict())

optimizer = optim.SGD(model.parameters(),
                      lr=0.1)

"""
NLLLoss损失函数与CrossEntropyLoss损失函数的关系
https://www.cnblogs.com/booturbo/p/16491565.html

CrossEntropyLoss损失函数是Softmax + Log + NLLLoss
"""
criterion = nn.CrossEntropyLoss()

criterion2 = nn.NLLLoss()

pd_data2 = pd.read_csv('breast_horz_promoter.csv')
print(model)

y = pd_data2["y"].values
X_train = pd_data2.drop("y", axis=1).values
train_df_y = torch.from_numpy(y).type(torch.float)
train_df_x = torch.from_numpy(X_train).type(torch.float)

train_dataset = TensorDataset(train_df_x, train_df_y)
train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=10,
    num_workers=0,
    shuffle=False)

for _batch_idx, (data, labels) in enumerate(train_loader):
    optimizer.zero_grad()
    output = model(data)
    output1 = torch.cat([1 - output, output], 1)
    predict = torch.log(output1)
    loss1 = criterion(predict, labels.to(torch.long))
    print("loss1", loss1)
    loss = criterion(output1, labels.to(torch.long))
    print("loss2", loss1)
    loss.backward()
    optimizer.step()

print(model.state_dict())
print(model.state_dict()['0.weight'].numpy()[0])
