import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt

lines = np.loadtxt('../data/USA_Housing.csv', delimiter=',', dtype='str')
header = lines[0]
lines = lines[1:].astype(float)

# 训练集和测试集
ratio = 0.8
split = int(len(lines) * ratio)
lines = np.random.permutation(lines)
train, test = lines[:split], lines[split:]

# 标准化  1 2 3 4 5 均值：3 方差：q
# 均值为0，方差为1的数据 -1.414 -0.707 0 0.707 1.414

scaler = StandardScaler()
scaler.fit(train)
train = scaler.transform(train)
test = scaler.transform(test)

x_train, y_train = train[:, :-1], train[:, -1]
x_test, y_test = test[:, :-1], test[:, -1]


# 取每一次的样本
def batch_generator(x, y, batch_size, shuffle=True):
    batch_count = 0
    if (shuffle):
        idx = np.random.permutation(len(x))
        x = x[idx]
        y = y[idx]
    while True:
        start = batch_count * batch_size
        end = min(start + batch_size, len(x))
        if (start >= len(x)):
            break
        batch_count += 1
        yield x[start:end], y[start:end]  # 生成器


# 实现随机梯度下降
def SGD(num_epochs, learning_rate, batch_size):
    X = np.concatenate([x_train, np.ones((len(x_train), 1))], axis=-1)
    X_test = np.concatenate([x_test, np.ones((len(x_test), 1))], axis=-1)
    theta = np.random.normal(size=X.shape[1])

    train_losses = []
    test_losses = []
    for i in range(num_epochs):
        batch = batch_generator(X, y_train, batch_size=batch_size)
        train_loss = 0
        for x, y in batch:
            grad = x.T @ (x @ theta - y)
            theta = theta - learning_rate * grad / batch_size
            train_loss += np.square(x @ theta - y).sum()
        train_loss = np.sqrt(train_loss / len(X))
        train_losses.append(train_loss)
        test_loss = np.sqrt(np.square(X_test @ theta - y_test).mean())
        test_losses.append(test_loss)
    return train_losses, test_losses, theta


num_epochs = 30
learning_rate = 0.01
batch_size = 32
np.random.seed(42)
train_losses, test_losses, theta = SGD(num_epochs, learning_rate, batch_size)

plt.figure(figsize=(6, 4))
plt.plot(range(1, num_epochs + 1), train_losses, label='Train RMSE')
plt.plot(range(1, num_epochs + 1), test_losses, label='Test  RMSE')
plt.xlabel('Epoch')
plt.ylabel('RMSE')
plt.title('Train vs Test RMSE per Epoch')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()


# learning_rate 对 迭代的影响
_,loss1,_ = SGD(num_epochs, learning_rate = 0.001, batch_size=32)
_,loss2,_ = SGD(num_epochs, learning_rate = 0.01, batch_size=32)
_,loss3,_ = SGD(num_epochs, learning_rate = 0.1, batch_size=32)

plt.figure(figsize=(6, 4))
plt.plot(range(1, num_epochs + 1), loss1, color='tab:blue',  label='lr=0.001')
plt.plot(range(1, num_epochs + 1), loss2, color='tab:orange', label='lr=0.01')
plt.plot(range(1, num_epochs + 1), loss3, color='tab:red',    label='lr=0.1')

plt.xlabel('Epoch')
plt.ylabel('RMSE')
plt.title('Effect of Learning Rate on RMSE')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()
