import torch
from torch import nn

# 准备数据（这里制造的假数据符合线性回归）
sample_n = 1000000  # 样本数量设置为一百万
sample_t = 1000  # 测试样本
# 线性回归，y = x A转置 + b
X = 10 * torch.rand([sample_n, 2]) - 5.0  # 一百万组数据，每组数据两个值，均匀分布
X_test = 10 * torch.rand([sample_t, 2]) - 5.0
w0 = torch.tensor([[2.0, -3.0]])
b0 = torch.tensor([[10.0]])
Y = X @ w0.t() + b0 + torch.normal(0.0, 2.0, size=[sample_n, 1])  # 设定w,b，得到标签数据。
Y_t = X_test @ w0.t() + b0 + torch.normal(0.0, 2.0, size=[sample_t, 1])
# X@w0.t()此处为矩阵乘法
# torch.normal(0.0, 2.0, size=[sample_n, 1])为干扰或噪声
print("torch.cuda.is_available() = ", torch.cuda.is_available())

data = X
label = Y

# 将数据集中数据和标签移到GPU上
data = X.cuda()
label = Y.cuda()
# ---检查数据是否移到GPU上啦---
print("X.device:", data.device)
print("Y.device:", label.device)


# 定义模型
class LinearRegression(nn.Module):
    def __init__(self):
        super().__init__()
        self.w = nn.Parameter(torch.rand_like(w0))
        self.b = nn.Parameter(torch.rand_like(b0))

    # 正向传播
    def forward(self, x):
        return x @ self.w.t() + self.b


# 训练过程
epochs = 100  # 训练轮次
losses = []  # 用于保存每一轮的损失


def train():
    import time
    tic = time.time()
    # 建模
    linear = LinearRegression()
    # 移动模型到GPU上
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    linear.to(device)

    # 建立优化函数和损失函数
    optimizer = torch.optim.Adam(linear.parameters(), lr=0.01)
    loss_func = nn.MSELoss()
    for epoch in range(epochs):
        optimizer.zero_grad()
        Y_pre = linear(data)
        loss = loss_func(Y_pre, label)
        loss.backward()
        optimizer.step()
        # 保存损失值
        losses.append(loss.item())

        if epoch % 10 == 0:
            print({"epoch": epoch, "loss": loss.item()})
    torch.save(linear, "./linear_parameter.pth")
    toc = time.time()
    print("time used:", toc - tic)


train()

# 测试模型效果
# data_t = X_test
# label_t = Y_t


data_t = X_test.cuda()
label_t = Y_t.cuda()


# def test():
#     import time
#     tic = time.time()
#     loss_func = nn.MSELoss()
#     linear_t = torch.load("./linear_parameter.pth", weights_only=False)
#     linear_t.eval()
#     Y_pre = linear_t(data_t)
#     loss_t = loss_func(Y_pre, label_t)
#     toc = time.time()
#     print("time used:{};and the loss_test={}".format((toc - tic), loss_t))


# test()