import torch as t
import torch.functional as F
import torch.nn as nn
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import random
from tqdm import tqdm

learnning = 0.05
A1 = [1, 0]
A2 = [0, 1]
B1 = [1, 1]
B2 = [0, 0]
datas = []
A_t = [1]
B_t = [0]
datas_t = []

for i in range(100):
    datas.append(A1)
    datas_t.append(A_t)
    datas.append(A2)
    datas_t.append(A_t)
    datas.append(B1)
    datas_t.append(B_t)
    datas.append(B2)
    datas_t.append(B_t)


#转换到np.array
datas = np.array(datas)
datas_t = np.array(datas_t)
print(len(datas), len(datas_t), '----')

#模型
model = nn.Sequential(
    nn.Linear(2, 3),
    nn.Sigmoid(),
    nn.Linear(3, 1),
    nn.Sigmoid()
)
#使用随机梯度优化器
optim = t.optim.SGD(model.parameters(), lr=learnning)
batch_size = 32

loss_values = []
acc_values = []

center_loss = []
for epoch in tqdm(range(3000)):
    #训练
    model.train()
    for i in range(int(len(datas) / batch_size)):
        indexes = [random.randint(0, len(datas) - 1) for i  in range(batch_size)]
        x = datas[indexes]
        target = datas_t[indexes]
        x = t.from_numpy(x).float()
        target = t.from_numpy(target).float()
        yt = model(x)
        diff = target - yt
        loss = t.mean(t.sqrt(t.pow(diff, 2)))
        # loss = get_loss(target, yt)
        #清空梯度
        model.zero_grad()
        optim.zero_grad()
        #反向传播
        loss.backward()
        optim.step()
        #纪律损失记录
        center_loss.append(loss.item()) # 记录损失值
    #测试
    if epoch % 50 == 0:
        model.eval()
        #损失
        loss_values.append(np.array(center_loss).mean())
        center_loss = []
        #正确率
        acc = 0
        x = t.from_numpy(np.array([[1, 0]])).float()
        y = t.from_numpy(np.array([[1]])).float()
        y_ = model(x)
        acc += 1 -t.abs((y - y_))
        x = t.from_numpy(np.array([[0, 1]])).float()
        y = t.from_numpy(np.array([[1]])).float()
        y_ = model(x)
        acc += 1 -t.abs((y - y_))
        x = t.from_numpy(np.array([[0, 0]])).float()
        y = t.from_numpy(np.array([[0]])).float()
        y_ = model(x)
        acc += 1 - t.abs((y - y_))
        x = t.from_numpy(np.array([[1, 1]])).float()
        y = t.from_numpy(np.array([[0]])).float()
        y_ = model(x)
        acc += 1 -t.abs((y - y_))
        #记录loss
        acc = (acc / 4).item()
        acc_values.append(acc)
        
# print(loss_values)
x = [i for i in range(len(loss_values))]
# print(x)
plt.scatter(x, loss_values, c='r')
plt.scatter(x, acc_values, c='g')
plt.title('red is loss, green is acc!')
plt.show()
