﻿import numpy as np

# def mse_cost_function(X, y):
#     """
#     计算逻辑回归的均方误差成本和梯度

#     参数:
#     X -- 输入特征矩阵，形状为 (m, n)
#     y -- 标签向量，形状为 (m,)
#     theta -- 参数向量，形状为 (n,)

#     返回:
#     cost -- 均方误差成本
#     grad -- 相对于参数 theta 的梯度，形状为 (n,)
#     """
#     m = len(y)  # 样本数量

#     # 计算预测值
#     h = linear_core_function(X, [0, 0], 10)

#     # 计算均方误差成本
#     cost = (1 / (2 * m)) * np.sum((h - y) ** 2)

#     # 计算梯度
#     grad = (1 / m) * np.dot(X.T, (h - y) * h * (1 - h))

#     return cost, grad


def mse_cost_function(params, data, core_function):
    k, b = params
    c = linear_core_function(k, [0, 0], b)
    cost = np.sum(np.abs(data[:, 1] - (k * data[:, 0] + b)))
    return cost


# # 示例用法
# # 初始化参数
# theta = np.zeros(X.shape[1])

# # 计算成本和梯度
# cost, grad = mse_cost_function(X, y, theta)
# print("Cost:", cost)
# print("Gradient:", grad)
