%% Newton-type method with BFGS update
% clear all;
close all;
clc;

% Defining Iteration constants
n_it = 60;
gamma = 0.3;
beta = 0.6;
sigma_min = 0;
tol = 1E-4;

% Defining physical constants
Restlength = 0.8;
D = 600;
m = 20;
g = 9.81;

% Initial value: tensor is a 3d matrix which contains a X, Y and Z
% coordinate for every point in the grid.
N=8;
tensor = zeros(N,N,3);
for i=1:N,
    % X and Y coordinates are evenly spaced
    tensor(i,:,1)=linspace(-N/2,N/2,N)-N/2;
    tensor(:,i,2)=linspace(-N/2,N/2,N);
end

% The starting height (Z coordinate) for every point is 1
tensor(:,:,3)=3;

% x0
x=tensor2array(tensor);

figure()
% Iteration header
fprintf('It.\t\t||∇L||\t||g||\t||Δx||\tt\n');

% Linearizing objective
[F_val J] = finite_difference(x, @cloth,Restlength,D,m,g);
y=x;
z=norm(J);
a=1;
% Linearizing constraint
[g_val Jg ] = finite_difference(x, @cloth_eqconstr2,Restlength,D,m,g);
[h_val Jh ] = finite_difference(x, @cloth_ineqconstr_globe,Restlength,D,m,g);

%B0 &lambda0 & lag0
Aineq=kron(eye(N^2),[0 0 1]);
Bineq=0.8*linspace(-1,1,N^2)';
B=10000*eye(length(x));
lambda=0;
lag_grad = J';
% This is needed to make the QP solver silent
opt = optimset('Display', 'off', 'LargeScale', 'off');
for k = 1 : n_it
    % Saving the lambda and x from the previous iterate.
    lambda_old=lambda;
    x_old=x;
    % We solve the QP and obtain search direction "dx" and dual variables
    % "lambda"
    [dx fval flag output lambda] = quadprog((B+B')/2, J', Jh, -h_val, Jg, -g_val, [], [], [], opt);
    mu = lambda.ineqlin;
    lambda = -lambda.eqlin;
    % We calculate the "sigma" of the merit function
    sigma = max(sigma_min,norm(lambda, inf));
    dlambda=lambda-lambda_old;
    % Evaluate merit function in the actual iterate
    T_act = F_val + sigma * norm(g_val, 1);
    % Calculate the directional derivative
    T_dir = J * dx - sigma * norm(g_val, 1);
    % Default value for line-search
    t = 1.0;
    % Preparing the first candidate
    x_new = x + t * dx;
    lambda_new = lambda_old + t*dlambda;
    F_new = cloth(x_new,Restlength,D,m,g);
    g_new = cloth_eqconstr2(x_new,Restlength,D,m,g);
    % Evaluate merit function in the actual iterate
    T_new = F_new + sigma * norm(g_new, 1);
    % Checking Armijo's condition (on the merit function)
    while (T_new > T_act + gamma * t * T_dir)
        t = beta * t;
        x_new = x + t * dx;
        F_new = cloth(x_new,Restlength,D,m,g);
        g_new = cloth_eqconstr2(x_new,Restlength,D,m,g);
        T_new = F_new + sigma * norm(g_new, 1) ;
    end
    % By now the candidate is accepted by Armijo's condition
    lag_grad = J'-Jg'*lambda+Jh'*mu;
    [F_val J] = finite_difference(x_new, @cloth,Restlength,D,m,g);
    [g_val Jg ] = finite_difference(x_new, @cloth_eqconstr2,Restlength,D,m,g);
    [h_val Jh ] = finite_difference(x_new, @cloth_ineqconstr_globe,Restlength,D,m,g);
    lag_grad_new = J'-Jg'*lambda+Jh'*mu;
    % BFGS update
    sk=x_new-x;
    yk=lag_grad_new-lag_grad;
    %     % calculating theta (Powells trick)
    if (yk' * sk < 0.2 * sk' * B * sk)
        theta = 0.8 * sk' * B * sk / (sk' * B * sk - sk' * yk);
    else
        theta = 0;
    end
    yk = yk + theta * (B * sk - yk);
    % New Hessian approximate
    B= B - B * sk * sk' * B / (sk' * B * sk) + yk * yk' / (sk' * yk);
    x=x_new;
    % Print some results
    fprintf('%d\t %f\t %f\t %f\t %f\t %f\n', k, norm(lag_grad), norm(g_val), norm(dx), t, max(eigs(B)))
    %     Checking stopping criterion
    if norm(lag_grad) < tol*10^1 && norm(g_val) < tol
        fprintf('Convergence achieved\n')
        break
    end
    % Plot intermediate results
    if(rem(k-1,4)==0)
        plotcloth_globe(x,Restlength);
    end
    drawnow;
    % Save the norm of the Lagrangian gradient & objective function
    y=[y x_new];
    z=[z norm(lag_grad)];
    a=[a max(eigs(B))];
    
end
%% Plot results
xnewminx=y-kron(ones(1,size(y,2)),y(:,end));
xnewminx=sum(xnewminx.^2,1);
figure;
semilogy(xnewminx);
xlabel('Iteration step');
ylabel('||xk - x||');
convRate = xnewminx(1,2:end)./xnewminx(1,1:end-1);
figure;
plot(convRate);
xlabel('Iteration step');
figure;
subplot(3,1,1);
plot(sum(y.^2,1));
xlabel('Objective function');
subplot(3,1,2);
plot(z);
xlabel('Norm of the Lagrange gradient');
subplot(3,1,3);
plot(a);
xlabel('Largest eigenvalue of Hessian');
% figure;
% tensie = array2tensor(x);
% texture = imread('clothTexture.jpg');
% h = surface(tensie(:,:,1), tensie(:,:,2), tensie(:,:,3));
% set(h,'CData',texture,'FaceColor','texturemap','EdgeColor', 'none');
% colormap('default');