#include"curve.h"
//用gd算法拟合函数 
FunctionParams gradientDescentFit(DataPoint *data, int num_data, int t) {
    //均方误差MSE  损失函数J(θ)=(y_pred[i]-y[i])的平方 求n项和  /n
    //θj=θj-学习率*J(θ)对θj求偏导
	FunctionParams result = {0};
    if (t == 1) {
        result.params[0] = 0;   // 初始参数值，这里设为0，
        result.params[1] = 0;
        result.params[2] = 0;
        result.params[3] = 0;
        result.num_params = 2;// 线性模型有两个参数
        double learning_rate = 0.0001;  // 学习率
        int epochs = 1000000;            // 迭代次数

        int iter = 0, i = 0;
        for (iter = 0; iter < epochs; iter++) {
            double sum_loss = 0; //损失函数总值
            double d_params[2] = {0}; //参数梯度为零
            for (i = 0; i < num_data; i++) {
                double y_pred = result.params[1] * data[i].x + result.params[0]; //根据预测方程得到预测值
                double loss = (y_pred - data[i].y) * (y_pred - data[i].y);//预测值与实际值的平方
                sum_loss += loss;//均方误差
                d_params[0] += 2 * (y_pred - data[i].y) * data[i].x;//求偏导
                d_params[1] += 2 * (y_pred - data[i].y);
            }
            result.params[1] -= learning_rate * d_params[0] / num_data;//根据计算得到的梯度和学习率更新参数   
            result.params[0] -= learning_rate * d_params[1] / num_data;
        }
        

    }
    else if (t == 2) {
        result.params[0] = 0;  // 初始参数
        result.params[1] = 0;
        result.params[2] = 0;
        result.params[3] = 0;
        result.num_params = 3;// 二次模型有三个参数
        double learning_rate = 0.00003;// 学习率
        int epochs = 1000000;// 迭代次数

        // 针对二次函数情况计算梯度并更新参数，循环迭代
        int iter = 0, i = 0;
        for (iter = 0; iter < epochs; iter++) {
            double sum_loss = 0;// 用于存储总损失
            double d_params[3] = {0};// 存储每个参数的梯度
             // 对每个数据点进行操作
            for (i = 0; i < num_data; i++) {
                double y_pred = result.params[2] * data[i].x * data[i].x + result.params[1] * data[i].x + result.params[0];// 预测值
                double loss = (y_pred - data[i].y) * (y_pred - data[i].y);// 计算损失
                sum_loss += loss;// 累加损失
                // 计算参数的梯度
                d_params[0] += 2 * (y_pred - data[i].y) * data[i].x * data[i].x;
                d_params[1] += 2 * (y_pred - data[i].y) * data[i].x;
                d_params[2] += 2 * (y_pred - data[i].y);
            }
			// 更新参数
            result.params[2] -= learning_rate * d_params[0] / num_data;
            result.params[1] -= learning_rate * d_params[1] / num_data;
            result.params[0] -= learning_rate * d_params[2] / num_data;
        }
        
		
    }
    else if (t == 3) {
        result.params[0] = 0;  // 分配内存存储三次模型的参数
        result.params[1] = 0;
        result.params[2] = 0;
        result.params[3] = 0;
        result.num_params = 4;
        double learning_rate = 0.0000045;// 学习率
        int epochs = 10000000;// 迭代次数

        // 针对三次函数情况计算梯度并更新参数，循环迭代
        int iter = 0, i = 0;
        for (iter = 0; iter < epochs; iter++) {
            double sum_loss = 0;// 用于存储总损失
            double d_params[4] = {0};// 存储每个参数的梯度
            for (i = 0; i < num_data; i++) {
                double y_pred = result.params[3] * data[i].x * data[i].x * data[i].x +
                                result.params[2] * data[i].x * data[i].x +
                                result.params[1] * data[i].x + result.params[0];// 预测值
                double loss = (y_pred - data[i].y) * (y_pred - data[i].y);// 计算损失
                sum_loss += loss;// 累加损失
                 // 计算参数的梯度
                d_params[0] += 2 * (y_pred - data[i].y) * data[i].x * data[i].x * data[i].x;
                d_params[1] += 2 * (y_pred - data[i].y) * data[i].x * data[i].x;
                d_params[2] += 2 * (y_pred - data[i].y) * data[i].x;
                d_params[3] += 2 * (y_pred - data[i].y);
            }
			// 更新参数
            result.params[3] -= learning_rate * d_params[0] / num_data;
            result.params[2] -= learning_rate * d_params[1] / num_data;
            result.params[1] -= learning_rate * d_params[2] / num_data;
            result.params[0] -= learning_rate * d_params[3] / num_data;
        }
    }
    return result;//返回训练后的参数
}