import numpy as np
from torch import nn
from torch.autograd import Variable
import torch

#
data = np.loadtxt(r'HeightWeight.csv', dtype=np.float32, delimiter=',', skiprows=1)
x = data[:, 1:]
y = data[:, :1]

# 线性问题 ， 多特征必须进行缩放
x = (x - x.mean(axis=0)) / x.std(axis=0)
# print(x)
x1_min = x[:, 0].min()
x1_max = x[:, 0].max()

print(x1_min)

x = Variable(torch.from_numpy(x)) # 必须保证tesor是float
y = Variable(torch.from_numpy(y))

z = nn.Linear(2, 1, bias=True)
h = nn.Sigmoid()

model = torch.nn.Sequential(z, h)

optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

for step in range(10001):
    optimizer.zero_grad()
    hypothesis = model(x)
    # cost/loss function
    cost = -(y * torch.log(hypothesis) + (1 - y )
             * torch.log(1 - hypothesis)).mean()
    cost.backward() # 累加梯度值
    optimizer.step()

    if step % 1000 == 0:
        print(step, cost.data.numpy())

predicted = (model(x).data > 0.5).float()
accuracy = (predicted == y.data).float().mean()
# print("\nHypothesis: ", hypothesis.data.numpy(), "\nCorrect (Y): ", predicted.numpy(), "\nAccuracy: ", accuracy)
w = z.weight.data.numpy() # z nn.Linear
b = z.bias.data.numpy()
print(w)
w1, w2 = w[:, 0], w[:, 1]

import matplotlib.pyplot as plt
for i in range(len(x)):
    if y[i] == 0:
        plt.scatter(x[i, 0], x[i, 1], c='r')
    else:
        plt.scatter(x[i, 0], x[i, 1], c='b')

x2_min = - (w1 * x1_min + b) / w2
x2_max = - (w1 * x1_max + b) / w2
plt.plot([x1_min, x2_min], [x1_max, x2_max])
plt.show()