import torch
import torch.nn as nn

import pandas as pd

import numpy as np
import seaborn as sns
import random
import matplotlib.pyplot as plt


def data_from_csv():
    #加载数据
    df = pd.read_csv('./watermelon-3a.csv')
    features = []
    labels = []

    for _, row in df.iterrows():
        #读取每一行的数据，存入数组
        f = [row['density'], row['sugar']]
        l = row['classification']

        features.append(f)
        labels.append(l)
 
    indices = list(range(len(features)))
    # 这些样本是随机读取的，没有特定的顺序
    random.shuffle(indices)

    i_features = []
    i_labels = []
    for i in indices:
        i_features.append(features[i])
        i_labels.append(labels[i])

    #转化为pytorch对象
    x = torch.tensor(i_features).to(torch.float32)
    y = torch.tensor(i_labels).to(torch.float32)

    return x, y, df


def accuracy(y_hat, y):
    correct_count = 0


    for i in range(len(y_hat)):
        if y_hat[i] > 0.5 and y[i] == 1:
            correct_count += 1

        elif y_hat[i] < 0.5 and y_hat[i] == 0:
            correct_count += 1

    return correct_count/ len(y_hat)



# ============================ step 1/5 加载数据 ============================
x, y, df = data_from_csv()


# ============================ step 2/5 选择模型 =============================
class LR(nn.Module):
    def __init__(self):
        super(LR, self).__init__()

        self.features = nn.Linear(2, 1)
        self.sigmoid = nn.Sigmoid()
 
    def forward(self, x):
        x = self.features(x)
        x = self.sigmoid(x)
        return x


lr_net = LR()   # 实例化逻辑回归模型

# ============================ step 3/5 选择损失函数 ============================
loss_fn = nn.BCELoss()


# ============================ step 4/5 选择优化器   ============================
lr = 0.05  # 学习率
optimizer = torch.optim.SGD(lr_net.parameters(), lr=lr, momentum=0.9)


# ============================ step 5/5 模型训练 ============================
for iteration in range(1000):
 
    # 前向传播
    y_hat = lr_net(x).squeeze()
 
    # 计算 loss
    loss = loss_fn(y_hat, y)
 
    # 反向传播
    loss.backward()
 
    # 更新参数
    optimizer.step()
 
    # 清空梯度
    optimizer.zero_grad()


    acc = accuracy(y_hat, y)
    print("epoch:{} loss:{:3f} acc:{:3f}\n".format(iteration+1, loss.data.numpy(), acc))
    if  acc >= 0.9:
        break


#============================== 绘图 ============================
sns.set()
sns.scatterplot(data=df, x='density', y='sugar', hue='classification')

w0, w1 = lr_net.features.weight[0]
w0, w1 = float(w0.item()), float(w1.item())
b = float(lr_net.features.bias[0].item())

plot_x = np.arange(0, 1, 0.01)
plot_y = (-w0 * plot_x - b) / w1
plt.plot(plot_x, plot_y)
plt.show()

