import numpy as np
from sklearn.preprocessing import StandardScaler

path_house_price = "C:\\Users\ASUS\Desktop\机器学习\数据集\\USA_Housing.csv"
lines = np.loadtxt(path_house_price, delimiter=',', dtype='str')
header = lines[0]
lines = lines[1:].astype(float)
print('数据特征：', ', '.join(header[:-1]))
print('数据标签：', header[-1])
print('数据总条数：', len(lines))

# 划分数据集和训练集
ratio = 0.8
split = int(len(lines) * ratio)
global_train, global_test = lines[0:split], lines[split:]

k = 8
data_sets = np.split(global_train, k, axis=0)
thetas = []
verify_losses = []
test_losses = []
for i in range(0, k):
    train = []
    for j in range(0, k):
        if j != i:
            for data in data_sets[j]:
                train.append(data)
    train = np.asarray(train)
    verify = data_sets[i].copy()
    scaler = StandardScaler()
    scaler.fit(train)
    train = scaler.transform(train)
    verify = scaler.transform(verify)
    # 测试集数据归一化
    test = scaler.transform(global_test)

    data_train = np.hstack([train[:, :-1], np.ones((len(train), 1))])
    label_train = np.reshape(train[:, -1], (len(train), 1))
    data_verify = np.hstack([verify[:, :-1], np.ones((len(verify), 1))])
    label_verify = np.reshape(verify[:, -1], (len(verify), 1))

    # 测试集数据格式调整
    data_test = np.hstack([test[:, :-1], np.ones((len(test), 1))])
    label_test = np.reshape(test[:, -1], (len(test), 1))

    # 直接使用解析解计算模型参数
    theta = np.linalg.inv(data_train.T @ data_train) @ data_train.T @ label_train
    rmse_loss_verify = np.sqrt(np.mean(np.square(data_verify @ theta - label_verify)))
    rmse_loss_test = np.sqrt(np.mean(np.square(data_test @ theta - label_test)))

    # 加入结果集
    thetas.append(theta)
    verify_losses.append(rmse_loss_verify)
    test_losses.append(rmse_loss_test)

thetas = np.asarray(thetas)
verify_losses = np.asarray(verify_losses)
test_losses = np.asarray(test_losses)
print(thetas)
print(verify_losses)
print(test_losses)

# 验证集损失最小时的参数不一定对应着测试集损失最小
print(np.argmin(verify_losses))
print(np.argmin(test_losses))