import torch as pt
import numpy as np
import matplotlib.pyplot as plt

np.random.seed(777)
pt.manual_seed(777)

ALPHA = 0.001
N_EPOCHS = 2000

x_data = [[1, 2, 1, 1], [2, 1, 3, 2], [3, 1, 3, 4], [4, 1, 5, 5],
          [1, 7, 5, 5], [1, 2, 5, 6], [1, 6, 6, 6], [1, 7, 7, 7]]
y_data = [[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0],
          [0, 1, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]]
# 1.	使用pytorch，完成多分类处理
# (1)	数据处理
# ①	将上面数据导入
x = pt.tensor(x_data).float()
y = pt.tensor(y_data).float()

# ②	创建多分类模型
model = pt.nn.Sequential(
    pt.nn.Linear(4, 20),
    pt.nn.Sigmoid(),
    pt.nn.Linear(20, 3),
    pt.nn.Softmax()
)
optim = pt.optim.Adam(params=model.parameters(), lr=ALPHA)


def acc(h, y):
    return (h.argmax(dim=1) == y.argmax(dim=1)).float().mean()


# ③	实现梯度下降
cost_history = np.zeros(N_EPOCHS)
for step in range(N_EPOCHS):
    model.train(True)
    # ④	底层实现多分类函数
    h = model(x)
    criterion = - (y * pt.log(h)).sum(dim=1).mean()
    criterion.backward()
    optim.step()
    model.train(False)
    cost = criterion.item()
    cost_history[step] = cost
    # ⑤	每200次打印代价函数一次
    if step % 200 == 0:
        accv = acc(h, y).item()
        print(f'#{step + 1}: cost = {cost}, acc = {accv}')
if step % 200 != 0:
    accv = acc(h, y).item()
    print(f'#{step + 1}: cost = {cost}, acc = {accv}')

# ⑥	使用[[1, 11, 7, 9], [1, 3, 4, 3], [1, 1, 0, 1]]打印预测结果
print('使用[[1, 11, 7, 9], [1, 3, 4, 3], [1, 1, 0, 1]]打印预测结果')
test = pt.tensor([[1, 11, 7, 9], [1, 3, 4, 3], [1, 1, 0, 1]]).float()
h = model(test)
h = h.argmax(dim=1)
print(h)

# ⑦	打印上面数据的准确率
print(f'上面数据的准确率: {accv}')

# ⑧	绘制损失函数图像
plt.plot(cost_history)
plt.title('Cost history')

# ⑨	打印模型准确率
print(f'模型准确率: {accv}')

# ⑩	打印模型权重和截距
print('打印模型权重和截距')
print(list(model.parameters()))

# Finally show all plotting
plt.show()
