import numpy as np
import torch as pt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

np.random.seed(777)
pt.manual_seed(777)

# 1.	使用pytorch，完成二分类处理
# (1)	数据处理
# ①	读取data-03-diabetes.csv数据
data = np.loadtxt('./data/data-03-diabetes.csv', delimiter=',')
x = data[:, :-1]
y = data[:, -1:]
M, N = x.shape
print('data count:', M, 'feature count:', N)
scaler = StandardScaler()
x = scaler.fit_transform(x)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=777)

# ②	将x，y转换成tensor处理
x_train_t = pt.from_numpy(x_train)
y_train_t = pt.from_numpy(y_train)
x_test_t = pt.from_numpy(x_test)
y_test_t = pt.from_numpy(y_test)

# (2)	模型处理
# ①	创建逻辑回归模型
model = pt.nn.Sequential(
    pt.nn.Linear(N, 1, bias=True),
    pt.nn.Sigmoid()
)
model = model.double()

# ②	配合随机梯度下降，调整合适的学习率
ALPHA = 0.001
optim = pt.optim.SGD(params=model.parameters(), lr=ALPHA)


# accuracy
def acc(ht, yt):
    return (ht > 0.5).eq(yt > 0.5).double().mean()


# ③	实现梯度下降过程
N_EPOCH = 12000
cost_history = np.zeros(N_EPOCH)
for i in range(N_EPOCH):
    model.train(True)
    optim.zero_grad()
    ht = model(x_train_t)
    # ④	代价函数使用pytorch底层实现
    cost = - (y_train_t * pt.log(ht) + (1 - y_train_t) * pt.log(1 - ht)).mean()
    cost.backward()
    optim.step()
    model.train(False)
    accv = acc(ht, y_train_t)
    cost_history[i] = cost
    # ⑤	每200次打印代价值
    if i % 200 == 0:
        print(f'#{i + 1}: cost = {cost.data.numpy()}, acc = {accv.numpy()}')
if i % 200 != 0:
    print(f'#{i + 1}: cost = {cost.data.numpy()}, acc = {accv.data.numpy()}')

# ⑥	打印最终准确率
model.eval()
ht = model(x_test_t)
accv = acc(ht, y_test_t)
print(f'测试集最终准确率:{accv.data.numpy()}')

# ⑦	绘制代价曲线
plt.plot(cost_history)
plt.title('Cost History')
plt.xlabel('Iterations')
plt.show()

# ⑧	打印所有预测结果
print('打印所有预测结果')
print('真实值, 预测值, 是否正确')
result = (ht > 0.5).data.numpy()
right = result == y_test
table = np.c_[y_test, result, right]
print(table)
