function [X, L, X_err, L_err, iter, flag] = ...
    sorpower(A, Xo, X_ref, L_ref, maxit, tol, print, w)
%
%  [X,L,X_err,L_err,iter,flag] = power(A, Xo, X_ref, L_ref, maxit, tol)
%
%  SORPOWER applies the SOR-accelerated power method to the eigenvalue 
%  problem
%     
%    (A - L*I)*X = 0
%
%  The unaccelerated iterates take the form
%    X[0]   = normalized guess, i.e. ||X[0]||_2 = 1.0
%    Z[l+1] = A * X[l]                                   (1)
%    L[l+1] = Z[l+1]' * Z[l+1]                           (2)
%    X[l+1] = Z[l+1] / sqrt(L[l+1])                      (3)
%
%  Acceleration replaces (3) by
%    X[l+1] = X[l] + w[l] * ( Z[l+1] / sqrt(L[l+1]) - X[l] )
%
%  A w[l] of 1 yields no acceleration.  A w[l] >(<) leads to
%  over(under)relaxation.  Note, 0 <= w <= 2.
%
%  An optimum *single* value of w to be applied can be shown to be
%    w_opt = 2 / (2 - L_1 - L_N)
%  where L_1 and L_N are the largest and smallest eigenvalues algebraically.  
%  This effectively modifies the eigenvalues of A such that L_1' = L_N' while
%  the Xi remain eigevalues.
%  
%  For example, for a problem with L_1 = 0.95 and L_N = 0.0, w_opt = 1.905, and
%  L_1' = -L_N' = (1 - w + w*L_1) = 0.905.  Typically,
%
%  Unfortunately, for our problem, L_1 and L_N are nearly equal in magnitude
%  to begin with.For the example data, L_1 = 0.99999992 while L_N = -0.99285642.
%
%  Reference:
%
%    E. Z. Mueller
%    Laban-PEL documentation
%
%  Inputs:
%
%    real       A(N,N)     -- N-by-N matrix
%    real       xo(N)      -- Initial guess N-vector.
%    real       lambda_ref -- The actual eigenvalue for checking errors
%    integer    maxit      -- The maximum number of iterations.
%    real       tol        -- The error tolerance (defined...?)
%
%  Outputs:
%
%    real       x(N)       -- The computed eigenvector.
%    real       lambda     -- The computed eigenvalue.
%    real       rho        -- The estimated dominance ratio.
%    real       err_norm   -- The norm of the error.
%    integer    iter       -- The number of iterations performed.
%    integer    flag       -- flag (0=okay, 1=maxit reached, -1=bad bad news)
%

if (nargin==6) 
  print = 1;
end
if (nargin<8)
    w = 1.0;
end
flag    = 1;
X_err   = 0;
L_err   = 0;

% Normalize the initial guess.
X  = Xo / norm(Xo, 2);

for iter = 1:maxit
    Z       = A * X; 
    L       = norm(Z, 2);
    X       = X + w * (Z / L - X);
    X_err = norm(X - sign(X(1)/X_ref(1))*X_ref, 2);
    L_err = abs(L - L_ref);
    if ( X_err <= tol && L_err <= tol)
        flag = 0;
        break;
    end;
    if ( mod(iter, 300) == 0 )
        printout(iter,X_err,L,L_err,print);
    end
end

if (flag==0 && print == 1)
    disp(' *** PI+SOR: Final Results *** ');
end
if (flag==1 && print==1)
    disp(' *** Warning: PI+SOR did not converge ***');
end

end
% end function sorpower

function printout(iter,X_err,L,L_err,print)
if (print==1)
    disp(sprintf(...
        ' Iter = %5i, X_err = %6.5e, L = %6.4e, L_err = %6.3e', ...
        iter, X_err,L,L_err))
end
end
% end function printout
