#include "newton.h"
#include "nlp.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>

// 计算带障碍项的目标函数值
double augmented_objective(NLPProblem *problem, double *x, double mu) {
    double fval = problem->f(x);
    
    // 处理不等式约束障碍项
    if (problem->m > 0) {
        double *g = allocate_vector(problem->m);
        problem->g(x, g);
        for (int i = 0; i < problem->m; i++) {
            if (g[i] >= 0) {
                free_vector(g);
                return INFINITY; // 违反约束
            }
            fval -= mu * log(-g[i]);
        }
        free_vector(g);
    }
    
    // 处理等式约束惩罚项
    if (problem->p > 0) {
        double *h = allocate_vector(problem->p);
        problem->h(x, h);
        for (int i = 0; i < problem->p; i++) {
            fval += mu * h[i] * h[i];
        }
        free_vector(h);
    }
    
    return fval;
}

// 计算梯度（使用解析梯度优先）
void compute_gradient(NLPProblem *problem, double *x, double mu, double *grad) {
    // 初始化梯度为零
    memset(grad, 0, problem->n * sizeof(double));
    
    // 如果有解析梯度函数就直接使用
    if (problem->grad_f) {
        problem->grad_f(x, grad);
    } else {
        // 否则使用有限差分法
        double h = 1e-5;
        double f_x = augmented_objective(problem, x, mu);
        double *x_plus_h = allocate_vector(problem->n);
        
        for (int i = 0; i < problem->n; i++) {
            memcpy(x_plus_h, x, problem->n * sizeof(double));
            x_plus_h[i] += h;
            
            double f_x_plus_h = augmented_objective(problem, x_plus_h, mu);
            grad[i] = (f_x_plus_h - f_x) / h;
        }
        free_vector(x_plus_h);
    }
    
    // 添加约束项的梯度贡献
    if (problem->m > 0) {
        double *g = allocate_vector(problem->m);
        double *jac_g = allocate_vector(problem->m * problem->n);
        
        problem->g(x, g);
        problem->jac_g(x, jac_g);
        
        for (int i = 0; i < problem->m; i++) {
            double coeff = mu / g[i];  // -mu/g[i] 因为g[i]<0
            for (int j = 0; j < problem->n; j++) {
                grad[j] -= coeff * jac_g[i * problem->n + j];
            }
        }
        
        free_vector(g);
        free_vector(jac_g);
    }
    
    if (problem->p > 0) {
        double *h = allocate_vector(problem->p);
        double *jac_h = allocate_vector(problem->p * problem->n);
        
        problem->h(x, h);
        problem->jac_h(x, jac_h);
        
        for (int i = 0; i < problem->p; i++) {
            double coeff = 2 * mu * h[i];
            for (int j = 0; j < problem->n; j++) {
                grad[j] += coeff * jac_h[i * problem->n + j];
            }
        }
        
        free_vector(h);
        free_vector(jac_h);
    }
}

// 计算Hessian矩阵
void compute_hessian(NLPProblem *problem, double *x, double mu, double *hess) {
    int n = problem->n;
    // 初始化Hessian矩阵为零
    memset(hess, 0, n * n * sizeof(double));

    // 目标函数的Hessian（这里简化为单位矩阵，实际应根据问题实现）
    for (int i = 0; i < n; i++) {
        hess[i * n + i] += 1.0;
    }

    // 添加不等式约束的Hessian贡献
    if (problem->m > 0) {
        double *g = allocate_vector(problem->m);
        double *jac_g = allocate_vector(problem->m * problem->n);
        
        problem->g(x, g);
        problem->jac_g(x, jac_g);
        
        for (int k = 0; k < problem->m; k++) {
            double coeff1 = mu / (g[k] * g[k]);
            for (int i = 0; i < n; i++) {
                for (int j = 0; j < n; j++) {
                    hess[i * n + j] += coeff1 * jac_g[k * n + i] * jac_g[k * n + j];
                    // 这里应该添加二阶导数项，但简化为零
                }
            }
        }
        
        free_vector(g);
        free_vector(jac_g);
    }

    // 添加等式约束的Hessian贡献
    if (problem->p > 0) {
        double *h = allocate_vector(problem->p);
        double *jac_h = allocate_vector(problem->p * problem->n);
        
        problem->h(x, h);
        problem->jac_h(x, jac_h);
        
        for (int k = 0; k < problem->p; k++) {
            double coeff = 2 * mu;
            for (int i = 0; i < n; i++) {
                for (int j = 0; j < n; j++) {
                    hess[i * n + j] += coeff * jac_h[k * n + i] * jac_h[k * n + j];
                    // 这里应该添加二阶导数项，但简化为零
                }
            }
        }
        
        free_vector(h);
        free_vector(jac_h);
    }
}

// 解线性方程组（使用部分主元高斯消元法）
double *solve_linear_system(double *A, double *b, int n) {
    double *x = allocate_vector(n);
    double *temp_A = allocate_vector(n * n);
    double *temp_b = allocate_vector(n);
    
    // 复制数据到临时数组
    memcpy(temp_A, A, n * n * sizeof(double));
    memcpy(temp_b, b, n * sizeof(double));
    
    // 消元过程
    for (int k = 0; k < n-1; k++) {
        // 部分主元选择
        int max_row = k;
        for (int i = k+1; i < n; i++) {
            if (fabs(temp_A[i * n + k]) > fabs(temp_A[max_row * n + k])) {
                max_row = i;
            }
        }
        
        // 交换行
        if (max_row != k) {
            for (int j = k; j < n; j++) {
                double temp = temp_A[k * n + j];
                temp_A[k * n + j] = temp_A[max_row * n + j];
                temp_A[max_row * n + j] = temp;
            }
            double temp = temp_b[k];
            temp_b[k] = temp_b[max_row];
            temp_b[max_row] = temp;
        }
        
        // 消元
        for (int i = k+1; i < n; i++) {
            double factor = temp_A[i * n + k] / temp_A[k * n + k];
            for (int j = k; j < n; j++) {
                temp_A[i * n + j] -= factor * temp_A[k * n + j];
            }
            temp_b[i] -= factor * temp_b[k];
        }
    }
    
    // 回代求解
    for (int i = n-1; i >= 0; i--) {
        x[i] = temp_b[i];
        for (int j = i+1; j < n; j++) {
            x[i] -= temp_A[i * n + j] * x[j];
        }
        x[i] /= temp_A[i * n + i];
    }
    
    free_vector(temp_A);
    free_vector(temp_b);
    
    return x;
}

// 向量范数计算
double vector_norm(double *vec, int n) {
    double sum = 0.0;
    for (int i = 0; i < n; i++) {
        sum += vec[i] * vec[i];
    }
    return sqrt(sum);
}

// 牛顿法主函数
void newton_method(NLPProblem *problem, double *x0, double mu, double tol) {
    int n = problem->n;
    double *x = allocate_vector(n);
    double *grad = allocate_vector(n);
    double *hess = allocate_vector(n * n);
    
    memcpy(x, x0, n * sizeof(double));
    
    int max_iter = 100;
    double error = tol + 1;
    
    for (int iter = 0; iter < max_iter && error > tol; iter++) {
        compute_gradient(problem, x, mu, grad);
        compute_hessian(problem, x, mu, hess);
        
        // 构造线性方程组 H*d = -grad
        double *b = allocate_vector(n);
        for (int i = 0; i < n; i++) {
            b[i] = -grad[i];
        }
        
        double *d = solve_linear_system(hess, b, n);
        free_vector(b);
        
        // 线搜索（简化版固定步长）
        double alpha = 1.0;
        for (int i = 0; i < n; i++) {
            x[i] += alpha * d[i];
        }
        
        error = vector_norm(grad, n);
        free_vector(d);
        
        printf("Iter %d: error = %g\n", iter, error);
    }
    
    // 将结果复制回x0
    memcpy(x0, x, n * sizeof(double));
    
    // 释放内存
    free_vector(x); 
    free_vector(grad);
    free_vector(hess);
}