import torch
import pandas as pd

import numpy as np
import seaborn as sns
import random
import matplotlib.pyplot as plt

#================= 加载数据 =================
def data_from_csv():
    #读取数据
    df = pd.read_csv('../data/watermelon-3a.csv')
    features = []
    labels = []

    for _, row in df.iterrows():
        #读取每一行的数据，存入数组
        f = [row['density'], row['sugar']]
        l = row['classification']

        features.append(f)
        labels.append(l)

    #打乱样本
    indices = list(range(len(features)))
    random.shuffle(indices)
    i_features = []
    i_labels = []
    for i in indices:
        i_features.append(features[i])
        i_labels.append(labels[i])

    #转化为pytorch对象
    x = torch.tensor(i_features).to(torch.float32)
    y = torch.tensor(i_labels).to(torch.float32)

    return x, y, df


#================= sigmod =================
def sigmoid(z):
    return 1 / (1 + torch.exp(-z))

#================= 线性模型 =================
def net(x, w, b):
    z = torch.matmul(x, w) + b    
    return sigmoid(z)


#==== 最大似然估计（交叉熵） maximum_likelihood_estimation ====
def MSE(y_hat, y):
    return -(y * torch.log(y_hat) + (1 - y) * torch.log(1 - y_hat))


#================= 随机梯度下降 =================
def SGD(params, lr): 
    with torch.no_grad():
        for param in params:
            param -= lr * param.grad
            param.grad.zero_()


#============== 简单的评估方法：分类正确率 =============
def accuracy(y_hat, y):
    correct_count = 0
    for i in range(len(y_hat)):
        #正类
        if y_hat[i] > 0.5 and y[i] == 1:
            correct_count += 1
            #print("pos:{}-{}".format(y_hat[i],y[i]))

        #负类
        elif y_hat[i] < 0.5 and y[i] == 0:
            correct_count += 1
            #print("neg:{}-{}".format(y_hat[i],y[i]))

    return correct_count/ len(y)


#==================== 学习  ========================
#载入数据
x, y, df = data_from_csv()

#初始化参数
w = torch.normal(0, 0.01, size=(x.size(1),1), requires_grad=True) #两行一列
b = torch.zeros(1, requires_grad=True)

#学习率
lr = 0.05
#学习轮数
num_epochs = 1000

loss_list = []


for epoch in range(num_epochs):
    # 通过线性模型
    y_hat = net(x, w, b).squeeze()

    # 计算对数似然
    loss = MSE(y_hat, y).sum()
    #储存一下
    loss_list.append(loss.item())

    # 计算梯度
    loss.backward()

    # 使用参数的梯度更新参数
    SGD([w, b], lr)  


    with torch.no_grad():
        # 评估模型
        y_hat_model= net(x, w, b)
        train_l = MSE(y_hat_model, y)
        acc = accuracy(y_hat_model, y)

        print("epoch:{} loss:{:4f} acc:{:3f}".format(epoch + 1, train_l.mean(), acc))
 
        if acc >=0.8:
            print(acc)
            break

    
#绘图
sns.set()
sns.scatterplot(data=df, x='density',y='sugar',hue='classification')

r_w = w.detach().numpy()
r_b = b.detach().numpy()
w0 = r_w[0][0]
w1 = r_w[1][0]
b0 = r_b[0]

line_x = np.arange(0,1,0.01)
line_y = (-w0 * line_x - b0) / w1

plt.title("w=[{:3f},{:3f}], b={:3f}".format(w0,w1,b0) )
plt.plot(line_x, line_y)


plt.figure()
plt.title("loss")
sns.lineplot(loss_list)
plt.show()

