import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from sklearn.preprocessing import StandardScaler

path_house_price = "C:\\Users\ASUS\Desktop\机器学习\数据集\\USA_Housing.csv"
lines = np.loadtxt(path_house_price, delimiter=',', dtype='str')
header = lines[0]
lines = lines[1:].astype(float)
print('数据特征：', ', '.join(header[:-1]))
print('数据标签：', header[-1])
print('数据总条数：', len(lines))

# 划分数据集和训练集
ratio = 0.8
split = int(len(lines) * ratio)
train, test = lines[0:split], lines[split:]

# 数据归一化
scaler = StandardScaler()
scaler.fit(train)
train = scaler.transform(train)
test = scaler.transform(test)

# 划分特征和标签
x_train, y_train = train[:, :-1], train[:, -1].flatten()
x_test, y_test = test[:, :-1], test[:, -1].flatten()

# 在X矩阵最后添加一列1，代表常数项
X = np.concatenate([x_train, np.ones((len(x_train), 1))], axis=-1)
# @ 表示矩阵相乘，X.T表示矩阵X的转置，np.linalg.inv函数可以计算矩阵的逆
theta = np.linalg.inv(X.T @ X) @ X.T @ y_train
print('回归系数:', theta)

# 获得预测结果
X_test = np.concatenate([x_test, np.ones((len(x_test), 1))], axis = -1)
y_pred = X_test @ theta

# 计算均方根误差
rmse_loss = np.sqrt(np.mean(np.square(y_test - y_pred)))
print('RMSE:', rmse_loss)



'''
使用sklearn自带的线性回归模型进行训练和测试
'''
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
# 模型中已考虑线性回归的常数项
linreg.fit(x_train, y_train)

# coef是回归系数θ， intercept是常数项
print('回归系数:', linreg.coef_, linreg.intercept_)
y_pred = linreg.predict(x_test)

# 获得预测结果
rmse_loss = np.sqrt(np.mean(np.square(y_test - y_pred)))
print('RMSE:', rmse_loss)


def batch_generator(x, y, batch_size, shuffle=True):
    batch_count = 0
    if shuffle:
        idx = np.random.permutation(len(x))
        x = x[idx]
        y = y[idx]
    while True:
        start = batch_count * batch_size
        end = min(start + batch_size, len(x))
        if start >= end:
            break
        batch_count += 1
        yield x[start: end], y[start: end]

def SGD(num_epoch, learning_rate, batch_size):
    # 拼接原始矩阵
    X = np.concatenate([x_train, np.ones((len(x_train), 1))], axis=-1)
    X_test = np.concatenate([x_test, np.ones((len(x_test), 1))], axis=-1)
    # 随机初始化参数
    theta = np.random.normal(size=X.shape[1])

    # SGD
    train_losses = []
    test_losses = []
    for i in range(num_epoch):
        batch_g = batch_generator(X, y_train, batch_size, shuffle=True)
        train_loss = 0
        for x_batch, y_batch in batch_g:
            # 计算梯度
            grad = x_batch.T @ (x_batch @ theta - y_batch)
            # 更新参数
            theta = theta - learning_rate * grad / len(x_batch)
            # 累加平方误差
            train_loss += np.square(x_batch @ theta - y_batch).sum()
        # 计算训练和测试误差
        train_loss = np.sqrt(train_loss / len(X))
        train_losses.append(train_loss)
        test_loss = np.sqrt(np.square(X_test @ theta - y_test).mean())
        test_losses.append(test_loss)

    # 输出结果，绘制训练曲线
    print('回归系数:', theta)
    return theta, train_losses, test_losses


# 设置迭代次数，学习率与批量大小
num_epoch = 20
learning_rate = 0.01
batch_size = 32
# 设置随机种子
np.random.seed(0)

_, train_losses, test_losses = SGD(num_epoch, learning_rate, batch_size)

# 将损失函数关于运行次数的关系制图，可以看到损失函数先一直保持下降，之后趋于平稳
plt.plot(np.arange(num_epoch), train_losses, color='blue',
    label='train loss')
plt.plot(np.arange(num_epoch), test_losses, color='red',
    ls='--', label='test loss')
# 由于epoch是整数，这里把图中的横坐标也设置为整数
# 该步骤也可以省略
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlabel('Epoch')
plt.ylabel('RMSE')
plt.legend()
plt.show()

























