# # from calendar import c
# # import torch 
# # import torch.nn as nn

# # #继承 nn.Module
# # class CustomMSELoss(nn.Module):
# #     """
# #     自定义的均方误差 (MSE) 损失函数。
# #     功能上等同于 nn.MSELoss()。
# #     """
# #     def __init__(self):
# #         # 调用父类（nn.Module）的构造函数
# #         super(CustomMSELoss,self).__init__()

# #     def forward(self,input,target):
# #         """
# #         计算 MSE 损失。
# #         :param input: 模型的预测值 (通常是 FloatTensor)
# #         :param target: 真实的标签值 (通常是 FloatTensor)
# #         :return: 损失的标量值
# #         """
# #         # 1.计算误差：（预测值 - 真实值）
# #         error = input - target

# #         #2. 计算误差的平方
# #         squared_error = torch.pow(error,2)

# #         # 3.求均值（Mean）
# #         mse_loss = torch.mean(squared_error)

# #         #返回损失值
# #         return mse_loss

# # # 验证自定义损失函数
# # # 假设预测值和真实值

# # pred = torch.tensor([1.0,2.0,3.0])
# # true = torch.tensor([1.5,2.5,3.5])

# # # 使用官方 MSELoss
# # official_loss = nn.MSELoss()
# # loss_official = official_loss(pred,true)

# # # 使用自定义 MSELoss
# # custom_loss = CustomMSELoss()
# # loss_custom = custom_loss(pred,true)

# # print(f"官方的MSE损失:{loss_official.item():.4f}")
# # print(f"自定义的MSE损失:{loss_custom.item():.4f}")


"""
现在，我们将把上面的
CustomMSELoss 应用到您之前的线性回归代码中，
替换 nn.MSELoss()。
"""
import torch 
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import math

#from pytorch_study.pytorch_训练模板.train_predict import criterion
# ----------------------------------------
# ★ 自定义的损失函数 (核心部分)
# ----------------------------------------

class CustomMSELoss(nn.Module):
    def __init__(self):
        super(CustomMSELoss,self).__init__()

    def forward(self,input,target):
        # 核心： (input - target)^2 的均值
        return torch.mean(torch.pow(input-target,2))

# ========== 1. 生成示例数据 ==========
# y = 2x + 1 + 一点噪声
torch.manual_seed(0)#只在cpu上固定随机种子
x = torch.unsqueeze(torch.linspace(-5,5,1000),dim=1)
y = torch.sin(x)
"""
torch.randn 函数是 PyTorch 中一个非常常用的函数，
用于创建一个张量（Tensor），其中的元素是服从**标准正态分布
（Standard Normal Distribution，即均值为 0，方差为 1）**的随机数。
"""

# ========== 2. 定义模型 ==========
# class LinearModel(nn.Module):
#     def __init__(self):
#         super(LinearModel,self).__init__()
#         self.fc1 = nn.Linear(1,32)
#         self.fc2 = nn.Linear(32,32)
#         self.fc3 = nn.Linear(32,32)
#         self.fc4 = nn.Linear(32,16)
#         self.fc5 = nn.Linear(16,16)
#         self.fc6 = nn.Linear(16,1)
    
#     def forward(self,x):
#         x = torch.tanh(self.fc1(x))
#         x = torch.tanh(self.fc2(x))
#         x = torch.tanh(self.fc3(x))
#         x = torch.tanh(self.fc4(x))
#         x = torch.tanh(self.fc5(x))
#         x = torch.tanh(self.fc6(x))
#         return x
# model = LinearModel()


class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        # 1. 输入层: 1 -> 32
        self.fc1 = nn.Linear(1, 32)
        # 2. 隐藏层 1: 32 -> 32
        self.fc2 = nn.Linear(32, 32)
        
        # === 增加的新层 (例如，32 -> 64 -> 32) ===
        self.fc_extra_1 = nn.Linear(32, 64) # 扩展到 64 个特征
        self.fc_extra_2 = nn.Linear(64, 32) # 再收缩回 32 个特征
        # ========================================
        
        # 3. 隐藏层 2: 32 -> 16
        self.fc3 = nn.Linear(32, 16)
        # 4. 隐藏层 3: 16 -> 16
        self.fc4 = nn.Linear(16, 16)
        # 5. 输出层: 16 -> 1
        self.fc5 = nn.Linear(16, 1)

    def forward(self, x):
        # fc1 -> ReLU
        x = torch.relu(self.fc1(x))
        # fc2 -> ReLU
        x = torch.relu(self.fc2(x))
        
        # 🚨 新增的计算路径
        x = torch.relu(self.fc_extra_1(x))
        x = torch.relu(self.fc_extra_2(x))
        
        # fc3 -> ReLU
        x = torch.relu(self.fc3(x))
        # fc4 -> ReLU
        x = torch.relu(self.fc4(x))
        # fc5 (输出层，不加激活函数，适用于回归)
        x = self.fc5(x)
        
        return x
model = LinearModel()
# ========== 3. 定义损失函数与优化器 ==========
# 替换官方的 nn.MSELoss() 为我们自定义的 CustomMSELoss()
# criterion = CustomMSELoss()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(),lr = 0.001)


# ========== 4. 训练模型 ==========
epochs = 200
for epoch in range(epochs):
    #前向传播
    outputs = model(x)
    loss = criterion(outputs,y)

    # 反向传播
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if (epoch+1)%20 == 0:
        print(f"Epoch [{epoch+1}/{epochs}],Custom Loss:{loss.item():.4f}")


# ========== 5. 可视化训练结果 ==========
predicted = model(x).detach()
plt.scatter(x.numpy(),y.numpy(),label = 'True Data')
plt.plot(x.numpy(), predicted.numpy(), color='r', label='Fitted Line')
plt.title('Linear Regression with Custom MSE Loss')
plt.legend()
plt.show()


