import numpy as np
import matplotlib.pyplot as plt

def boundary(x, w):
    return -(w[0] + w[1]*x) / w[2]

def sigmoid(x):
    return 1.0 / (1 + np.exp(-x))

def prediction(x, w):
    prob = sigmoid(np.dot(x, w))
    return np.round(prob)

class NN:
    def __init__(self, X, y, batch_size=32, learning_rate=0.01, num_epochs=100):
        self.X = X
        self.y = y
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.num_samples, self.num_features = X.shape
        self.weights = np.random.randn(self.num_features)

    def calculate_loss_gradient(self, x, y):
        z = np.dot(x, self.weights)
        pred = sigmoid(z)
        error = pred - y
        grad = np.dot(x.T, error)
        return error, grad

    def loss(self):
        z = np.dot(self.X, self.weights)
        pred = sigmoid(z)
        pred_labels = np.round(pred)
        accuracy = np.mean(pred_labels == self.y)
        loss = -np.mean(self.y * np.log(pred) + (1 - self.y) * np.log(1 - pred))
        return accuracy, loss

    def train(self):
        losses = []
        accuracies = []
        for epoch in range(self.num_epochs):
            indices = np.random.permutation(self.num_samples)
            for start in range(0, self.num_samples, self.batch_size):
                end = start + self.batch_size
                batch_indices = indices[start:end]
                batch_X = self.X[batch_indices]
                batch_y = self.y[batch_indices]
        
                error, grad = self.calculate_loss_gradient(batch_X, batch_y)
                self.weights -= self.learning_rate * grad
        
            accuracy, loss = self.loss()
            losses.append(loss)
            accuracies.append(accuracy)

            if epoch % 10 == 0:
                print(f"Epoch {epoch}: Loss={loss}, Accuracy={accuracy}")
        
        return losses, accuracies

# 读取数据
data = np.genfromtxt('nn_data/data1.txt', delimiter=',')
X = data[:, :-1]
y = data[:, -1]

# 初始化模型并训练
model = NN(X, y)
losses, accuracies = model.train()

# 输出最终参数
print("Final weights:")
print(model.weights)

# 可视化训练过程
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.plot(losses)
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')

plt.subplot(1, 2, 2)
plt.plot(accuracies)
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')

plt.tight_layout()
plt.show()

# 绘制决策边界
plt.scatter(X[:, 0], X[:, 1], c=y)
x_boundary = np.linspace(np.min(X[:, 0]), np.max(X[:, 0]), 100)
y_boundary = boundary(x_boundary, model.weights)
plt.plot(x_boundary, y_boundary, 'r')
plt.title('Decision Boundary')
plt.xlabel('Score 1')
plt.ylabel('Score 2')
plt.show()
