import torch
import torch.nn as nn
import torch.optim as optim

# 模拟数据
X = torch.tensor([[50000], [80000], [30000], [70000], [60000], [45000], [90000], [20000], [72000], [55000]])
y = torch.tensor([700, 720, 650, 760, 680, 620, 780, 580, 740, 660])

# 定义模型
class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        self.linear = nn.Linear(1, 1)  # 单线性层

    def forward(self, x):
        return self.linear(x)

# 实例化模型
model = LinearModel()

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)  # 学习率设为0.01

# 训练模型
num_epochs = 100  # 训练100个周期
for epoch in range(num_epochs):
    # 前向传播
    outputs = model(X)
    loss = criterion(outputs, y)

    # 反向传播和优化
    optimizer.zero_grad()  # 清空梯度
    loss.backward()  # 反向传播
    optimizer.step()  # 更新权重

    if (epoch+1) % 10 == 0:  # 每10个周期打印一次损失值
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

# 测试模型
with torch.no_grad():  # 测试时不需要计算梯度
    predicted_scores = model(X)
    print(f'Predicted Credit Scores: {predicted_scores}')

# 打印模型参数
print(f'Slope (Coefficient): {model.linear.weight.item()}')
print(f'Intercept: {model.linear.bias.item()}')


if __name__ == '__main__':
    print()