yalmip('clear')
clc
echo on

%*********************************************************
%
% Sum-of-squares decomposition
%
%*********************************************************
%
% This example shows how to solve some simple SOS-problems.
pause
clc

% Define variables
x = sdpvar(1,1);
y = sdpvar(1,1);
z = sdpvar(1,1);
pause

% Define a polynomial to be analyzed
p = 12+y^2-2*x^3*y+2*y*z^2+x^6-2*x^3*z^2+z^4+x^2*y^2;
pause

% and the corresponding constraint
F = set(sos(p));
pause

% Call solvesos to calculate SOS-decomposition.
solvesos(F);
pause

clc
% Extract decomposition (a lot of spurious terms so the display is messy...)
h = sosd(F);
sdisplay(h)
pause

% Cleaned display...

sdisplay(clean(h,1e-4))
pause


% To obtain more sparse solutions, it can sometimes be 
% beneficial to minimize the trace of the Gramian Q
% in the decomposition p(x) = v(x)'Qv(x)=h'(x)h(x)
%
% This can be done by specifying sos.traceobj=1
pause

solvesos(F,[],sdpsettings('sos.traceobj',1));
h = sosd(F);
sdisplay(h) % Still a lot of small terms...
pause

sdisplay(clean(h,1e-4)) % cleaned...
pause


% To clean the decomposition from small monomials already in solvesos,
% use the option 'sos.clean'. This will remove terms in chol(Q)v(x) with 
% coefficients smaller than sos.clean.
%
% NOTE, this means that the match between p and h'h will be detoriate, since we
% clean h after the SOS computations are done. This should only be used if
% you only want to display the polynomial. 
pause

solvesos(F,[],sdpsettings('sos.traceobj',1,'sos.clean',1e-4));
h = sosd(F);
sdisplay(h)
pause

% Is it really a SOS-decomposition
% If so, p-h'*h=0
pause
sdisplay(p-h'*h)
pause
clc

% What! Failure!? No, numerical issues.
% Remove all terms smaller than 1e-6
clean(p-h'*h,1e-6)
pause

% The largest coefficient in the polynomial
% p-h'*h is displayed in checkset as the 
% primal residual for a SOS constraint
checkset(F)

pause
clc

clc
% Let us take deeper look at the decomposition 
pause
[sol,v,Q,res] = solvesos(F,[],sdpsettings('sos.traceobj',1));
pause

% Gramian (Block diagonal due to the feature sos.congruence)
Q{1}
pause

% Should be positive definite (may depend on your solver due to numerical precision)
eig(Q{1})
pause

% Monomials used in decomposition
sdisplay(v{1})
pause

% ...numerical mismatch 
sdisplay(p-v{1}'*Q{1}*v{1});
pause

% Largest coefficient (in absolute value) in the error polynomial p-v'Qv
mismatch = max(abs(getbase(p-v{1}'*Q{1}*v{1})))
pause

% Should be the same as reported in checkset.
% (May be different if Q not is positive semidefinite)
checkset(F)
pause

% The numerical value can easily be extracted also
% from CHECKSET.
mismatch = checkset(F)
pause

% Also given as 4th output from solvesos
res
pause

clc
% By studying the diagonal of the Gramian, we see that
% a lot of monomials are not used in the decomposition
% (zero diagonal means that the corresponding row and
% column is zero, hence the corresponding monomial is 
% only multiplied by 0)
diag(Q{1})
pause

% Let us re-solve the problem, but this time we manually
% specify what monomials to use.
% Since we know that the monomials generated by YALMIP
% are guaranteed to be sufficient, but Q indicates that
% some actually are redundant, let us re-use the old ones,
% but skip those corresponding to small diagonals.
%
% User specified monomials is the fifth input.
usethese = find(diag(Q{1})>1e-3);
pause
[sol,v,Q] = solvesos(F,[],sdpsettings('sos.traceobj',1),[],v{1}(usethese));
pause

% The net result is a smaller decomposition 
sdisplay(sosd(F))
pause

% The problem is better conditioned and leads to smaller residuals
checkset(F)
pause

% The Gramian is of-course smaller now.
% No zero diagonal, hence we have no simple way to
% obtain a smaller decomposition.
Q{1}
pause

% However, rather annoyingly there are a lot of terms
% in Q that are almost 0, but not quite (once again, this
% depends on what solver you have, how succesful YALMIPs
% post-processing is etc.)
%
% It is possible to tell YALMIP to analyse the sparsity
% of the Gramian after it has computed it, and re-solve
% the problem, but this time forcing the elements to be 0.
%
% Note, we are not cleaning the Gramian a-posteriori, but
% resolving the problem. Hence, the decomposition is correct.
%
% This can be obtained with the option sos.impsparse
pause
[sol,v,Q] = solvesos(F,[],sdpsettings('sos.traceobj',1,'sos.impsparse',1),[],v{1});
pause

% Sweet...
Q{1}
pause

clc
% Alternatively, we can tell YALMIP to study the almost-zero pattern
% of the Grmamians, and derive a block-diagonalization based on this.
% By setting the options sos.numblkdg to a number larger than zero,
% YALMIP will declare number smaller than this tolerance as zero, and
% detect any hidden block-structure, re-solve the problem with this
% more economic parameterization, and repeat until the block-structure
% not changes any more. Note that this not only gives "nicer" solutions,
% but also improves the numerical conditioning of the problem.
%
% This is the prefered way to post-process the solution. 
pause
[sol,v,Q] = solvesos(F,[],sdpsettings('sos.traceobj',1,'sos.numblkdg',1e-5));
pause

% Sweet...
Q{1}
pause



clc
% As a second example, we solve a somewhat larger problem.
%
% We want to show that the following matrix is co-positive
J = [1 -1  1  1 -1;
    -1  1 -1  1  1;
     1 -1  1 -1  1;
     1  1 -1  1 -1;
    -1  1  1 -1  1];
pause

% It is clearly co-positive if this polynomial 
% is a sum-of-squares
x1 = sdpvar(1,1);x2 = sdpvar(1,1);x3 = sdpvar(1,1);x4 = sdpvar(1,1);x5 = sdpvar(1,1);
z  = [x1^2 x2^2 x3^2 x4^2 x5^2]';
p  = z'*J*z;
pause

solvesos(set(sos(p)))

% Hmm, failure...
%
% Note that the error-message displayed can be somewhat mis-guiding.
% Depending on the solver and the option sos.model, infeasible problems can
% be declared as unbounded, and vice versa. In most cases (using SeDuMi,
% PENSDP, SDPT3,... and kernel representation model) infeasibility is
% reported as unbounded and an unbounded objective is reported as
% infeasible (the reason is that YALMIP solves a problem related to the
% dual of the SOS problem when the kernel representation model is used.)
%
% Let's multiply the polynomial with the positive definite function
% sum(x_i^2). If this new polynomial is SOS, then so is the original.

p_new = p*(x1^2+x2^2+x3^2+x4^2+x5^2);

pause
F = set(sos(p_new));
[sol,v,Q] = solvesos(F);
checkset(F)
% We found a decomposition, hence p is SOS and J is co-positive
pause

% Just for fun, let us solve the problem again, this time
% removing some redundant terms
pause
usethese = find(diag(Q{1})>1e-3);
[sol,v,Q] = solvesos(F,[],[],[],v{1}(usethese));
pause

clc
% The basic idea in sum of squares readily 
% extend also to the matrix valued case.
%
% In other words, find a decomposition of
% the symmetric polynomial matrix P(x), 
% hence proving global postive definiteness.
pause

% Define a symmetric polynomail matrix
sdpvar x1 x2
P = [1+x1^2 -x1+x2+x1^2;-x1+x2+x1^2 2*x1^2-2*x1*x2+x2^2];
pause

% Call SOLVESOS
[sol,v,Q] = solvesos(set(sos(P)));

% The basis is now matrix valued
sdisplay(v{1})
pause

% Check that the polynomials match
clean(v{1}'*Q{1}*v{1}-P,1e-8)
pause

clc
% Let us now solve a parameterized SOS-problem.
%
% A typical application is to find a lower bound
% on the global minima.
%
% This is done by solving
%   
%            max t
% subject to p(x)-t is SOS
pause

% Define p and t 
x = sdpvar(1,1);
y = sdpvar(1,1);
z = sdpvar(1,1);

p = 12+y^2-2*x^3*y+2*y*z^2+x^6-2*x^3*z^2+z^4+x^2*y^2;
t = sdpvar(1,1)
pause

% maximize t subject to SOS-constraint
%
% SOLVESOS will automatically categorize t as a 
% parametric variable since it is part of the objective
solvesos(set(sos(p-t)),-t);
pause

% Lower bound
double(t)
pause
clc


% Ok, now for some more advanced coding
%
% Given the nonlinear system dxdt=f(x),
% we will try to prove that z'Pz is a lyapunov function
% where z = [x1;x2;x1x2;x1^2;x2^2] and P positive definite
pause

% Define state-variables and system
x = sdpvar(2,1);
f = [-x(1)-x(1)^3+x(2);-x(2)];

pause

% Define z, P, Lyapunov function and derivatives
z = [x(1);x(2);x(1)^2;x(1)*x(2);x(2)^2];
P = sdpvar(length(z),length(z));
V = z'*P*z;
dVdx = jacobian(V,x);
dVdt = dVdx*f;
pause

% Try to prove that dVdt<0, while minimizing
% trace(P), subject to P>0
%
% All parametric variables (i.e. P) are constrained
% hence SOLVESOS will find them automatically.
%
% Alternative
% solvesos(F,trace(P),[],P(:));
%
F = set(sos(-dVdt)) + set(P>eye(5));
[sol,v,Q] = solvesos(F,trace(P));
pause
clc

% Checking the validity of the SOS-decomposition is easily done
% (checks SOS-decomposition at the current value of P)
checkset(F)
pause

% So, according to the theory, we should have -dVdt==v'Qv
clean(v{1}'*Q{1}*v{1}-(-dVdt),1e-2)
pause

% No! v{1}'*Q{1}*v{1} is a decomposition of -dVdt
% when P is *fixed* to the computed optimal value.
%
% v{1}'*Q{1}*v{1} is a polynomial in x only, while
% dVdt is a polynomial in x and P.
pause

% To check the decomposition manually, we need to
% define the polynomials with the computed value 
% of P
%
% (Of-course, in practise it is most often more convenient
% to use CHECKSET where this is done automatically)
V = z'*double(P)*z;
dVdx = jacobian(V,x);
dVdt = dVdx*f;
clean(-dVdt-v{1}'*Q{1}*v{1},1e-10)
pause

clc
% Finally, make sure to check out the help in the HTML manual for more information.
pause

echo off


