from __future__ import division, print_function

from typing import Any

from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.compatibility import (
    Callable, NotIterable, as_int, is_sequence)
from sympy.core.decorators import deprecated
from sympy.core.expr import Expr
from sympy.core.numbers import mod_inverse
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol, _uniquely_named_symbol, symbols
from sympy.core.sympify import sympify
from sympy.functions import exp, factorial, log
from sympy.functions.elementary.miscellaneous import Max, Min, sqrt
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.polys import cancel
from sympy.printing import sstr
from sympy.simplify.simplify import (
    simplify as _simplify, dotprodsimp as _dotprodsimp)
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten, numbered_symbols
from sympy.utilities.misc import filldedent

from .common import (
    MatrixCommon, MatrixError, NonSquareMatrixError, NonInvertibleMatrixError,
    ShapeError)

from .utilities import _iszero, _is_zero_after_expand_mul

from .determinant import (
    _find_reasonable_pivot, _find_reasonable_pivot_naive,
    _adjugate, _charpoly, _cofactor, _cofactor_matrix,
    _det, _det_bareiss, _det_berkowitz, _det_LU, _minor, _minor_submatrix)

from .reductions import _is_echelon, _echelon_form, _rank, _rref
from .subspaces import _columnspace, _nullspace, _rowspace, _orthogonalize

from .eigen import (
    _eigenvals, _eigenvects, _is_diagonalizable, _diagonalize,
    _eval_is_positive_definite,
    _is_positive_definite, _is_positive_semidefinite,
    _is_negative_definite, _is_negative_semidefinite, _is_indefinite,
    _jordan_form, _left_eigenvects, _singular_values)


class DeferredVector(Symbol, NotIterable):
    """A vector whose components are deferred (e.g. for use with lambdify)

    Examples
    ========

    >>> from sympy import DeferredVector, lambdify
    >>> X = DeferredVector( 'X' )
    >>> X
    X
    >>> expr = (X[0] + 2, X[2] + 3)
    >>> func = lambdify( X, expr)
    >>> func( [1, 2, 3] )
    (3, 6)
    """

    def __getitem__(self, i):
        if i == -0:
            i = 0
        if i < 0:
            raise IndexError('DeferredVector index out of range')
        component_name = '%s[%d]' % (self.name, i)
        return Symbol(component_name)

    def __str__(self):
        return sstr(self)

    def __repr__(self):
        return "DeferredVector('%s')" % self.name


class MatrixDeterminant(MatrixCommon):
    """Provides basic matrix determinant operations. Should not be instantiated
    directly. See ``determinant.py`` for their implementations."""

    def _eval_det_bareiss(self, iszerofunc=_is_zero_after_expand_mul,
            dotprodsimp=None):
        return _det_bareiss(self, iszerofunc=iszerofunc,
                dotprodsimp=dotprodsimp)

    def _eval_det_berkowitz(self, dotprodsimp=None):
        return _det_berkowitz(self, dotprodsimp=dotprodsimp)

    def _eval_det_lu(self, iszerofunc=_iszero, simpfunc=None, dotprodsimp=None):
        return _det_LU(self, iszerofunc=iszerofunc, simpfunc=simpfunc,
                dotprodsimp=dotprodsimp)

    def _eval_determinant(self): # for expressions.determinant.Determinant
        return _det(self)

    def adjugate(self, method="berkowitz", dotprodsimp=None):
        return _adjugate(self, method=method, dotprodsimp=dotprodsimp)

    def charpoly(self, x='lambda', simplify=_simplify, dotprodsimp=None):
        return _charpoly(self, x=x, simplify=simplify, dotprodsimp=dotprodsimp)

    def cofactor(self, i, j, method="berkowitz", dotprodsimp=None):
        return _cofactor(self, i, j, method=method, dotprodsimp=dotprodsimp)

    def cofactor_matrix(self, method="berkowitz", dotprodsimp=None):
        return _cofactor_matrix(self, method=method, dotprodsimp=dotprodsimp)

    def det(self, method="bareiss", iszerofunc=None, dotprodsimp=None):
        return _det(self, method=method, iszerofunc=iszerofunc,
                dotprodsimp=dotprodsimp)

    def minor(self, i, j, method="berkowitz", dotprodsimp=None):
        return _minor(self, i, j, method=method, dotprodsimp=dotprodsimp)

    def minor_submatrix(self, i, j):
        return _minor_submatrix(self, i, j)

    _find_reasonable_pivot.__doc__       = _find_reasonable_pivot.__doc__
    _find_reasonable_pivot_naive.__doc__ = _find_reasonable_pivot_naive.__doc__
    _eval_det_bareiss.__doc__            = _det_bareiss.__doc__
    _eval_det_berkowitz.__doc__          = _det_berkowitz.__doc__
    _eval_det_lu.__doc__                 = _det_LU.__doc__
    _eval_determinant.__doc__            = _det.__doc__
    adjugate.__doc__                     = _adjugate.__doc__
    charpoly.__doc__                     = _charpoly.__doc__
    cofactor.__doc__                     = _cofactor.__doc__
    cofactor_matrix.__doc__              = _cofactor_matrix.__doc__
    det.__doc__                          = _det.__doc__
    minor.__doc__                        = _minor.__doc__
    minor_submatrix.__doc__              = _minor_submatrix.__doc__


class MatrixReductions(MatrixDeterminant):
    """Provides basic matrix row/column operations. Should not be instantiated
    directly. See ``reductions.py`` for some of their implementations."""

    def echelon_form(self, iszerofunc=_iszero, simplify=False, with_pivots=False,
            dotprodsimp=None):
        return _echelon_form(self, iszerofunc=iszerofunc, simplify=simplify,
                with_pivots=with_pivots, dotprodsimp=dotprodsimp)

    @property
    def is_echelon(self):
        return _is_echelon(self)

    def rank(self, iszerofunc=_iszero, simplify=False, dotprodsimp=None):
        return _rank(self, iszerofunc=iszerofunc, simplify=simplify,
                dotprodsimp=dotprodsimp)

    def rref(self, iszerofunc=_iszero, simplify=False, pivots=True,
            normalize_last=True, dotprodsimp=None):
        return _rref(self, iszerofunc=iszerofunc, simplify=simplify,
            pivots=pivots, normalize_last=normalize_last, dotprodsimp=dotprodsimp)

    echelon_form.__doc__ = _echelon_form.__doc__
    is_echelon.__doc__   = _is_echelon.__doc__
    rank.__doc__         = _rank.__doc__
    rref.__doc__         = _rref.__doc__

    def _normalize_op_args(self, op, col, k, col1, col2, error_str="col"):
        """Validate the arguments for a row/column operation.  ``error_str``
        can be one of "row" or "col" depending on the arguments being parsed."""
        if op not in ["n->kn", "n<->m", "n->n+km"]:
            raise ValueError("Unknown {} operation '{}'. Valid col operations "
                             "are 'n->kn', 'n<->m', 'n->n+km'".format(error_str, op))

        # define self_col according to error_str
        self_cols = self.cols if error_str == 'col' else self.rows

        # normalize and validate the arguments
        if op == "n->kn":
            col = col if col is not None else col1
            if col is None or k is None:
                raise ValueError("For a {0} operation 'n->kn' you must provide the "
                                 "kwargs `{0}` and `k`".format(error_str))
            if not 0 <= col < self_cols:
                raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col))

        elif op == "n<->m":
            # we need two cols to swap. It doesn't matter
            # how they were specified, so gather them together and
            # remove `None`
            cols = set((col, k, col1, col2)).difference([None])
            if len(cols) > 2:
                # maybe the user left `k` by mistake?
                cols = set((col, col1, col2)).difference([None])
            if len(cols) != 2:
                raise ValueError("For a {0} operation 'n<->m' you must provide the "
                                 "kwargs `{0}1` and `{0}2`".format(error_str))
            col1, col2 = cols
            if not 0 <= col1 < self_cols:
                raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col1))
            if not 0 <= col2 < self_cols:
                raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col2))

        elif op == "n->n+km":
            col = col1 if col is None else col
            col2 = col1 if col2 is None else col2
            if col is None or col2 is None or k is None:
                raise ValueError("For a {0} operation 'n->n+km' you must provide the "
                                 "kwargs `{0}`, `k`, and `{0}2`".format(error_str))
            if col == col2:
                raise ValueError("For a {0} operation 'n->n+km' `{0}` and `{0}2` must "
                                 "be different.".format(error_str))
            if not 0 <= col < self_cols:
                raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col))
            if not 0 <= col2 < self_cols:
                raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col2))

        else:
            raise ValueError('invalid operation %s' % repr(op))

        return op, col, k, col1, col2

    def _eval_col_op_multiply_col_by_const(self, col, k):
        def entry(i, j):
            if j == col:
                return k * self[i, j]
            return self[i, j]
        return self._new(self.rows, self.cols, entry)

    def _eval_col_op_swap(self, col1, col2):
        def entry(i, j):
            if j == col1:
                return self[i, col2]
            elif j == col2:
                return self[i, col1]
            return self[i, j]
        return self._new(self.rows, self.cols, entry)

    def _eval_col_op_add_multiple_to_other_col(self, col, k, col2):
        def entry(i, j):
            if j == col:
                return self[i, j] + k * self[i, col2]
            return self[i, j]
        return self._new(self.rows, self.cols, entry)

    def _eval_row_op_swap(self, row1, row2):
        def entry(i, j):
            if i == row1:
                return self[row2, j]
            elif i == row2:
                return self[row1, j]
            return self[i, j]
        return self._new(self.rows, self.cols, entry)

    def _eval_row_op_multiply_row_by_const(self, row, k):
        def entry(i, j):
            if i == row:
                return k * self[i, j]
            return self[i, j]
        return self._new(self.rows, self.cols, entry)

    def _eval_row_op_add_multiple_to_other_row(self, row, k, row2):
        def entry(i, j):
            if i == row:
                return self[i, j] + k * self[row2, j]
            return self[i, j]
        return self._new(self.rows, self.cols, entry)

    def elementary_col_op(self, op="n->kn", col=None, k=None, col1=None, col2=None):
        """Performs the elementary column operation `op`.

        `op` may be one of

            * "n->kn" (column n goes to k*n)
            * "n<->m" (swap column n and column m)
            * "n->n+km" (column n goes to column n + k*column m)

        Parameters
        ==========

        op : string; the elementary row operation
        col : the column to apply the column operation
        k : the multiple to apply in the column operation
        col1 : one column of a column swap
        col2 : second column of a column swap or column "m" in the column operation
               "n->n+km"
        """

        op, col, k, col1, col2 = self._normalize_op_args(op, col, k, col1, col2, "col")

        # now that we've validated, we're all good to dispatch
        if op == "n->kn":
            return self._eval_col_op_multiply_col_by_const(col, k)
        if op == "n<->m":
            return self._eval_col_op_swap(col1, col2)
        if op == "n->n+km":
            return self._eval_col_op_add_multiple_to_other_col(col, k, col2)

    def elementary_row_op(self, op="n->kn", row=None, k=None, row1=None, row2=None):
        """Performs the elementary row operation `op`.

        `op` may be one of

            * "n->kn" (row n goes to k*n)
            * "n<->m" (swap row n and row m)
            * "n->n+km" (row n goes to row n + k*row m)

        Parameters
        ==========

        op : string; the elementary row operation
        row : the row to apply the row operation
        k : the multiple to apply in the row operation
        row1 : one row of a row swap
        row2 : second row of a row swap or row "m" in the row operation
               "n->n+km"
        """

        op, row, k, row1, row2 = self._normalize_op_args(op, row, k, row1, row2, "row")

        # now that we've validated, we're all good to dispatch
        if op == "n->kn":
            return self._eval_row_op_multiply_row_by_const(row, k)
        if op == "n<->m":
            return self._eval_row_op_swap(row1, row2)
        if op == "n->n+km":
            return self._eval_row_op_add_multiple_to_other_row(row, k, row2)


class MatrixSubspaces(MatrixReductions):
    """Provides methods relating to the fundamental subspaces of a matrix.
    Should not be instantiated directly. See ``subspaces.py`` for their
    implementations."""

    def columnspace(self, simplify=False, dotprodsimp=None):
        return _columnspace(self, simplify=simplify, dotprodsimp=dotprodsimp)

    def nullspace(self, simplify=False, iszerofunc=_iszero, dotprodsimp=None):
        return _nullspace(self, simplify=simplify, iszerofunc=iszerofunc,
                dotprodsimp=dotprodsimp)

    def rowspace(self, simplify=False, dotprodsimp=None):
        return _rowspace(self, simplify=simplify, dotprodsimp=dotprodsimp)

    # This is a classmethod but is converted to such later in order to allow
    # assignment of __doc__ since that does not work for already wrapped
    # classmethods in Python 3.6.
    def orthogonalize(cls, *vecs, **kwargs):
        return _orthogonalize(cls, *vecs, **kwargs)

    columnspace.__doc__   = _columnspace.__doc__
    nullspace.__doc__     = _nullspace.__doc__
    rowspace.__doc__      = _rowspace.__doc__
    orthogonalize.__doc__ = _orthogonalize.__doc__

    orthogonalize         = classmethod(orthogonalize)


class MatrixEigen(MatrixSubspaces):
    """Provides basic matrix eigenvalue/vector operations.
    Should not be instantiated directly. See ``eigen.py`` for their
    implementations."""

    def _eval_is_positive_definite(self, method="eigen", dotprodsimp=None):
        return _eval_is_positive_definite(self, method=method,
                dotprodsimp=dotprodsimp)

    def eigenvals(self, error_when_incomplete=True, dotprodsimp=None, **flags):
        return _eigenvals(self, error_when_incomplete=error_when_incomplete,
                dotprodsimp=dotprodsimp, **flags)

    def eigenvects(self, error_when_incomplete=True, iszerofunc=_iszero,
            dotprodsimp=None, **flags):
        return _eigenvects(self, error_when_incomplete=error_when_incomplete,
                iszerofunc=iszerofunc, dotprodsimp=dotprodsimp, **flags)

    def is_diagonalizable(self, reals_only=False, dotprodsimp=None, **kwargs):
        return _is_diagonalizable(self, reals_only=reals_only,
                dotprodsimp=dotprodsimp, **kwargs)

    def diagonalize(self, reals_only=False, sort=False, normalize=False,
            dotprodsimp=None):
        return _diagonalize(self, reals_only=reals_only, sort=sort,
                normalize=normalize, dotprodsimp=dotprodsimp)

    @property
    def is_positive_definite(self):
        return _is_positive_definite(self)

    @property
    def is_positive_semidefinite(self):
        return _is_positive_semidefinite(self)

    @property
    def is_negative_definite(self):
        return _is_negative_definite(self)

    @property
    def is_negative_semidefinite(self):
        return _is_negative_semidefinite(self)

    @property
    def is_indefinite(self):
        return _is_indefinite(self)

    def jordan_form(self, calc_transform=True, dotprodsimp=None, **kwargs):
        return _jordan_form(self, calc_transform=calc_transform,
                dotprodsimp=dotprodsimp, **kwargs)

    def left_eigenvects(self, **flags):
        return _left_eigenvects(self, **flags)

    def singular_values(self, dotprodsimp=None):
        return _singular_values(self, dotprodsimp=dotprodsimp)

    _eval_is_positive_definite.__doc__ = _eval_is_positive_definite.__doc__
    eigenvals.__doc__                  = _eigenvals.__doc__
    eigenvects.__doc__                 = _eigenvects.__doc__
    is_diagonalizable.__doc__          = _is_diagonalizable.__doc__
    diagonalize.__doc__                = _diagonalize.__doc__
    is_positive_definite.__doc__       = _is_positive_definite.__doc__
    is_positive_semidefinite.__doc__   = _is_positive_semidefinite.__doc__
    is_negative_definite.__doc__       = _is_negative_definite.__doc__
    is_negative_semidefinite.__doc__   = _is_negative_semidefinite.__doc__
    is_indefinite.__doc__              = _is_indefinite.__doc__
    jordan_form.__doc__                = _jordan_form.__doc__
    left_eigenvects.__doc__            = _left_eigenvects.__doc__
    singular_values.__doc__            = _singular_values.__doc__


class MatrixCalculus(MatrixCommon):
    """Provides calculus-related matrix operations."""

    def diff(self, *args, **kwargs):
        """Calculate the derivative of each element in the matrix.
        ``args`` will be passed to the ``integrate`` function.

        Examples
        ========

        >>> from sympy.matrices import Matrix
        >>> from sympy.abc import x, y
        >>> M = Matrix([[x, y], [1, 0]])
        >>> M.diff(x)
        Matrix([
        [1, 0],
        [0, 0]])

        See Also
        ========

        integrate
        limit
        """
        # XXX this should be handled here rather than in Derivative
        from sympy import Derivative
        kwargs.setdefault('evaluate', True)
        deriv = Derivative(self, *args, evaluate=True)
        if not isinstance(self, Basic):
            return deriv.as_mutable()
        else:
            return deriv

    def _eval_derivative(self, arg):
        return self.applyfunc(lambda x: x.diff(arg))

    def _accept_eval_derivative(self, s):
        return s._visit_eval_derivative_array(self)

    def _visit_eval_derivative_scalar(self, base):
        # Types are (base: scalar, self: matrix)
        return self.applyfunc(lambda x: base.diff(x))

    def _visit_eval_derivative_array(self, base):
        # Types are (base: array/matrix, self: matrix)
        from sympy import derive_by_array
        return derive_by_array(base, self)

    def integrate(self, *args, **kwargs):
        """Integrate each element of the matrix.  ``args`` will
        be passed to the ``integrate`` function.

        Examples
        ========

        >>> from sympy.matrices import Matrix
        >>> from sympy.abc import x, y
        >>> M = Matrix([[x, y], [1, 0]])
        >>> M.integrate((x, ))
        Matrix([
        [x**2/2, x*y],
        [     x,   0]])
        >>> M.integrate((x, 0, 2))
        Matrix([
        [2, 2*y],
        [2,   0]])

        See Also
        ========

        limit
        diff
        """
        return self.applyfunc(lambda x: x.integrate(*args, **kwargs))

    def jacobian(self, X):
        """Calculates the Jacobian matrix (derivative of a vector-valued function).

        Parameters
        ==========

        ``self`` : vector of expressions representing functions f_i(x_1, ..., x_n).
        X : set of x_i's in order, it can be a list or a Matrix

        Both ``self`` and X can be a row or a column matrix in any order
        (i.e., jacobian() should always work).

        Examples
        ========

        >>> from sympy import sin, cos, Matrix
        >>> from sympy.abc import rho, phi
        >>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
        >>> Y = Matrix([rho, phi])
        >>> X.jacobian(Y)
        Matrix([
        [cos(phi), -rho*sin(phi)],
        [sin(phi),  rho*cos(phi)],
        [   2*rho,             0]])
        >>> X = Matrix([rho*cos(phi), rho*sin(phi)])
        >>> X.jacobian(Y)
        Matrix([
        [cos(phi), -rho*sin(phi)],
        [sin(phi),  rho*cos(phi)]])

        See Also
        ========

        hessian
        wronskian
        """
        if not isinstance(X, MatrixBase):
            X = self._new(X)
        # Both X and ``self`` can be a row or a column matrix, so we need to make
        # sure all valid combinations work, but everything else fails:
        if self.shape[0] == 1:
            m = self.shape[1]
        elif self.shape[1] == 1:
            m = self.shape[0]
        else:
            raise TypeError("``self`` must be a row or a column matrix")
        if X.shape[0] == 1:
            n = X.shape[1]
        elif X.shape[1] == 1:
            n = X.shape[0]
        else:
            raise TypeError("X must be a row or a column matrix")

        # m is the number of functions and n is the number of variables
        # computing the Jacobian is now easy:
        return self._new(m, n, lambda j, i: self[j].diff(X[i]))

    def limit(self, *args):
        """Calculate the limit of each element in the matrix.
        ``args`` will be passed to the ``limit`` function.

        Examples
        ========

        >>> from sympy.matrices import Matrix
        >>> from sympy.abc import x, y
        >>> M = Matrix([[x, y], [1, 0]])
        >>> M.limit(x, 2)
        Matrix([
        [2, y],
        [1, 0]])

        See Also
        ========

        integrate
        diff
        """
        return self.applyfunc(lambda x: x.limit(*args))


# https://github.com/sympy/sympy/pull/12854
class MatrixDeprecated(MatrixCommon):
    """A class to house deprecated matrix methods."""
    def _legacy_array_dot(self, b):
        """Compatibility function for deprecated behavior of ``matrix.dot(vector)``
        """
        from .dense import Matrix

        if not isinstance(b, MatrixBase):
            if is_sequence(b):
                if len(b) != self.cols and len(b) != self.rows:
                    raise ShapeError(
                        "Dimensions incorrect for dot product: %s, %s" % (
                            self.shape, len(b)))
                return self.dot(Matrix(b))
            else:
                raise TypeError(
                    "`b` must be an ordered iterable or Matrix, not %s." %
                    type(b))

        mat = self
        if mat.cols == b.rows:
            if b.cols != 1:
                mat = mat.T
                b = b.T
            prod = flatten((mat * b).tolist())
            return prod
        if mat.cols == b.cols:
            return mat.dot(b.T)
        elif mat.rows == b.rows:
            return mat.T.dot(b)
        else:
            raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (
                self.shape, b.shape))


    def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
        return self.charpoly(x=x)

    def berkowitz_det(self):
        """Computes determinant using Berkowitz method.

        See Also
        ========

        det
        berkowitz
        """
        return self.det(method='berkowitz')

    def berkowitz_eigenvals(self, **flags):
        """Computes eigenvalues of a Matrix using Berkowitz method.

        See Also
        ========

        berkowitz
        """
        return self.eigenvals(**flags)

    def berkowitz_minors(self):
        """Computes principal minors using Berkowitz method.

        See Also
        ========

        berkowitz
        """
        sign, minors = self.one, []

        for poly in self.berkowitz():
            minors.append(sign * poly[-1])
            sign = -sign

        return tuple(minors)

    def berkowitz(self):
        from sympy.matrices import zeros
        berk = ((1,),)
        if not self:
            return berk

        if not self.is_square:
            raise NonSquareMatrixError()

        A, N = self, self.rows
        transforms = [0] * (N - 1)

        for n in range(N, 1, -1):
            T, k = zeros(n + 1, n), n - 1

            R, C = -A[k, :k], A[:k, k]
            A, a = A[:k, :k], -A[k, k]

            items = [C]

            for i in range(0, n - 2):
                items.append(A * items[i])

            for i, B in enumerate(items):
                items[i] = (R * B)[0, 0]

            items = [self.one, a] + items

            for i in range(n):
                T[i:, i] = items[:n - i + 1]

            transforms[k - 1] = T

        polys = [self._new([self.one, -A[0, 0]])]

        for i, T in enumerate(transforms):
            polys.append(T * polys[i])

        return berk + tuple(map(tuple, polys))

    def cofactorMatrix(self, method="berkowitz"):
        return self.cofactor_matrix(method=method)

    def det_bareis(self):
        return _det_bareiss(self)

    def det_LU_decomposition(self):
        """Compute matrix determinant using LU decomposition


        Note that this method fails if the LU decomposition itself
        fails. In particular, if the matrix has no inverse this method
        will fail.

        TODO: Implement algorithm for sparse matrices (SFF),
        http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.

        See Also
        ========


        det
        det_bareiss
        berkowitz_det
        """
        return self.det(method='lu')

    def jordan_cell(self, eigenval, n):
        return self.jordan_block(size=n, eigenvalue=eigenval)

    def jordan_cells(self, calc_transformation=True):
        P, J = self.jordan_form()
        return P, J.get_diag_blocks()

    def minorEntry(self, i, j, method="berkowitz"):
        return self.minor(i, j, method=method)

    def minorMatrix(self, i, j):
        return self.minor_submatrix(i, j)

    def permuteBkwd(self, perm):
        """Permute the rows of the matrix with the given permutation in reverse."""
        return self.permute_rows(perm, direction='backward')

    def permuteFwd(self, perm):
        """Permute the rows of the matrix with the given permutation."""
        return self.permute_rows(perm, direction='forward')


class MatrixBase(MatrixDeprecated,
                 MatrixCalculus,
                 MatrixEigen,
                 MatrixCommon):
    """Base class for matrix objects."""
    # Added just for numpy compatibility
    __array_priority__ = 11

    is_Matrix = True
    _class_priority = 3
    _sympify = staticmethod(sympify)
    zero = S.Zero
    one = S.One

    # Mutable:
    __hash__ = None  # type: ignore

    # Defined here the same as on Basic.

    # We don't define _repr_png_ here because it would add a large amount of
    # data to any notebook containing SymPy expressions, without adding
    # anything useful to the notebook. It can still enabled manually, e.g.,
    # for the qtconsole, with init_printing().
    def _repr_latex_(self):
        """
        IPython/Jupyter LaTeX printing

        To change the behavior of this (e.g., pass in some settings to LaTeX),
        use init_printing(). init_printing() will also enable LaTeX printing
        for built in numeric types like ints and container types that contain
        SymPy objects, like lists and dictionaries of expressions.
        """
        from sympy.printing.latex import latex
        s = latex(self, mode='plain')
        return "$\\displaystyle %s$" % s

    _repr_latex_orig = _repr_latex_  # type: Any

    def __array__(self, dtype=object):
        from .dense import matrix2numpy
        return matrix2numpy(self, dtype=dtype)

    def __getattr__(self, attr):
        if attr in ('diff', 'integrate', 'limit'):
            def doit(*args):
                item_doit = lambda item: getattr(item, attr)(*args)
                return self.applyfunc(item_doit)

            return doit
        else:
            raise AttributeError(
                "%s has no attribute %s." % (self.__class__.__name__, attr))

    def __len__(self):
        """Return the number of elements of ``self``.

        Implemented mainly so bool(Matrix()) == False.
        """
        return self.rows * self.cols

    def __mathml__(self):
        mml = ""
        for i in range(self.rows):
            mml += "<matrixrow>"
            for j in range(self.cols):
                mml += self[i, j].__mathml__()
            mml += "</matrixrow>"
        return "<matrix>" + mml + "</matrix>"


    def _diagonal_solve(self, rhs):
        """Helper function of function diagonal_solve, without the error
        checks, to be used privately.
        """
        return self._new(
            rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / self[i, i])

    def _matrix_pow_by_jordan_blocks(self, num, dotprodsimp=None):
        from sympy.matrices import diag, MutableMatrix
        from sympy import binomial

        def jordan_cell_power(jc, n):
            N = jc.shape[0]
            l = jc[0,0]
            if l.is_zero:
                if N == 1 and n.is_nonnegative:
                    jc[0,0] = l**n
                elif not (n.is_integer and n.is_nonnegative):
                    raise NonInvertibleMatrixError("Non-invertible matrix can only be raised to a nonnegative integer")
                else:
                    for i in range(N):
                        jc[0,i] = KroneckerDelta(i, n)
            else:
                for i in range(N):
                    bn = binomial(n, i)
                    if isinstance(bn, binomial):
                        bn = bn._eval_expand_func()
                    jc[0,i] = l**(n-i)*bn
            for i in range(N):
                for j in range(1, N-i):
                    jc[j,i+j] = jc [j-1,i+j-1]

        P, J = self.jordan_form(dotprodsimp=dotprodsimp)
        jordan_cells = J.get_diag_blocks()
        # Make sure jordan_cells matrices are mutable:
        jordan_cells = [MutableMatrix(j) for j in jordan_cells]
        for j in jordan_cells:
            jordan_cell_power(j, num)
        return self._new(P.multiply(diag(*jordan_cells), dotprodsimp=dotprodsimp)
                .multiply(P.inv(dotprodsimp=dotprodsimp), dotprodsimp=dotprodsimp))

    def __repr__(self):
        return sstr(self)

    def __str__(self):
        if self.rows == 0 or self.cols == 0:
            return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
        return "Matrix(%s)" % str(self.tolist())

    def _format_str(self, printer=None):
        if not printer:
            from sympy.printing.str import StrPrinter
            printer = StrPrinter()
        # Handle zero dimensions:
        if self.rows == 0 or self.cols == 0:
            return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
        if self.rows == 1:
            return "Matrix([%s])" % self.table(printer, rowsep=',\n')
        return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')

    @classmethod
    def irregular(cls, ntop, *matrices, **kwargs):
      """Return a matrix filled by the given matrices which
      are listed in order of appearance from left to right, top to
      bottom as they first appear in the matrix. They must fill the
      matrix completely.

      Examples
      ========

      >>> from sympy import ones, Matrix
      >>> Matrix.irregular(3, ones(2,1), ones(3,3)*2, ones(2,2)*3,
      ...   ones(1,1)*4, ones(2,2)*5, ones(1,2)*6, ones(1,2)*7)
      Matrix([
        [1, 2, 2, 2, 3, 3],
        [1, 2, 2, 2, 3, 3],
        [4, 2, 2, 2, 5, 5],
        [6, 6, 7, 7, 5, 5]])
      """
      from sympy.core.compatibility import as_int
      ntop = as_int(ntop)
      # make sure we are working with explicit matrices
      b = [i.as_explicit() if hasattr(i, 'as_explicit') else i
          for i in matrices]
      q = list(range(len(b)))
      dat = [i.rows for i in b]
      active = [q.pop(0) for _ in range(ntop)]
      cols = sum([b[i].cols for i in active])
      rows = []
      while any(dat):
          r = []
          for a, j in enumerate(active):
              r.extend(b[j][-dat[j], :])
              dat[j] -= 1
              if dat[j] == 0 and q:
                  active[a] = q.pop(0)
          if len(r) != cols:
            raise ValueError(filldedent('''
                Matrices provided do not appear to fill
                the space completely.'''))
          rows.append(r)
      return cls._new(rows)

    @classmethod
    def _handle_creation_inputs(cls, *args, **kwargs):
        """Return the number of rows, cols and flat matrix elements.

        Examples
        ========

        >>> from sympy import Matrix, I

        Matrix can be constructed as follows:

        * from a nested list of iterables

        >>> Matrix( ((1, 2+I), (3, 4)) )
        Matrix([
        [1, 2 + I],
        [3,     4]])

        * from un-nested iterable (interpreted as a column)

        >>> Matrix( [1, 2] )
        Matrix([
        [1],
        [2]])

        * from un-nested iterable with dimensions

        >>> Matrix(1, 2, [1, 2] )
        Matrix([[1, 2]])

        * from no arguments (a 0 x 0 matrix)

        >>> Matrix()
        Matrix(0, 0, [])

        * from a rule

        >>> Matrix(2, 2, lambda i, j: i/(j + 1) )
        Matrix([
        [0,   0],
        [1, 1/2]])

        See Also
        ========
        irregular - filling a matrix with irregular blocks
        """
        from sympy.matrices.sparse import SparseMatrix
        from sympy.matrices.expressions.matexpr import MatrixSymbol
        from sympy.matrices.expressions.blockmatrix import BlockMatrix
        from sympy.utilities.iterables import reshape

        flat_list = None

        if len(args) == 1:
            # Matrix(SparseMatrix(...))
            if isinstance(args[0], SparseMatrix):
                return args[0].rows, args[0].cols, flatten(args[0].tolist())

            # Matrix(Matrix(...))
            elif isinstance(args[0], MatrixBase):
                return args[0].rows, args[0].cols, args[0]._mat

            # Matrix(MatrixSymbol('X', 2, 2))
            elif isinstance(args[0], Basic) and args[0].is_Matrix:
                return args[0].rows, args[0].cols, args[0].as_explicit()._mat

            # Matrix(numpy.ones((2, 2)))
            elif hasattr(args[0], "__array__"):
                # NumPy array or matrix or some other object that implements
                # __array__. So let's first use this method to get a
                # numpy.array() and then make a python list out of it.
                arr = args[0].__array__()
                if len(arr.shape) == 2:
                    rows, cols = arr.shape[0], arr.shape[1]
                    flat_list = [cls._sympify(i) for i in arr.ravel()]
                    return rows, cols, flat_list
                elif len(arr.shape) == 1:
                    rows, cols = arr.shape[0], 1
                    flat_list = [cls.zero] * rows
                    for i in range(len(arr)):
                        flat_list[i] = cls._sympify(arr[i])
                    return rows, cols, flat_list
                else:
                    raise NotImplementedError(
                        "SymPy supports just 1D and 2D matrices")

            # Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
            elif is_sequence(args[0]) \
                    and not isinstance(args[0], DeferredVector):
                dat = list(args[0])
                ismat = lambda i: isinstance(i, MatrixBase) and (
                    evaluate or
                    isinstance(i, BlockMatrix) or
                    isinstance(i, MatrixSymbol))
                raw = lambda i: is_sequence(i) and not ismat(i)
                evaluate = kwargs.get('evaluate', True)
                if evaluate:
                    def do(x):
                        # make Block and Symbol explicit
                        if isinstance(x, (list, tuple)):
                            return type(x)([do(i) for i in x])
                        if isinstance(x, BlockMatrix) or \
                                isinstance(x, MatrixSymbol) and \
                                all(_.is_Integer for _ in x.shape):
                            return x.as_explicit()
                        return x
                    dat = do(dat)

                if dat == [] or dat == [[]]:
                    rows = cols = 0
                    flat_list = []
                elif not any(raw(i) or ismat(i) for i in dat):
                    # a column as a list of values
                    flat_list = [cls._sympify(i) for i in dat]
                    rows = len(flat_list)
                    cols = 1 if rows else 0
                elif evaluate and all(ismat(i) for i in dat):
                    # a column as a list of matrices
                    ncol = set(i.cols for i in dat if any(i.shape))
                    if ncol:
                        if len(ncol) != 1:
                            raise ValueError('mismatched dimensions')
                        flat_list = [_ for i in dat for r in i.tolist() for _ in r]
                        cols = ncol.pop()
                        rows = len(flat_list)//cols
                    else:
                        rows = cols = 0
                        flat_list = []
                elif evaluate and any(ismat(i) for i in dat):
                    ncol = set()
                    flat_list = []
                    for i in dat:
                        if ismat(i):
                            flat_list.extend(
                                [k for j in i.tolist() for k in j])
                            if any(i.shape):
                                ncol.add(i.cols)
                        elif raw(i):
                            if i:
                                ncol.add(len(i))
                                flat_list.extend(i)
                        else:
                            ncol.add(1)
                            flat_list.append(i)
                        if len(ncol) > 1:
                            raise ValueError('mismatched dimensions')
                    cols = ncol.pop()
                    rows = len(flat_list)//cols
                else:
                    # list of lists; each sublist is a logical row
                    # which might consist of many rows if the values in
                    # the row are matrices
                    flat_list = []
                    ncol = set()
                    rows = cols = 0
                    for row in dat:
                        if not is_sequence(row) and \
                                not getattr(row, 'is_Matrix', False):
                            raise ValueError('expecting list of lists')
                        if not row:
                            continue
                        if evaluate and all(ismat(i) for i in row):
                            r, c, flatT = cls._handle_creation_inputs(
                                [i.T for i in row])
                            T = reshape(flatT, [c])
                            flat = [T[i][j] for j in range(c) for i in range(r)]
                            r, c = c, r
                        else:
                            r = 1
                            if getattr(row, 'is_Matrix', False):
                                c = 1
                                flat = [row]
                            else:
                                c = len(row)
                                flat = [cls._sympify(i) for i in row]
                        ncol.add(c)
                        if len(ncol) > 1:
                            raise ValueError('mismatched dimensions')
                        flat_list.extend(flat)
                        rows += r
                    cols = ncol.pop() if ncol else 0

        elif len(args) == 3:
            rows = as_int(args[0])
            cols = as_int(args[1])

            if rows < 0 or cols < 0:
                raise ValueError("Cannot create a {} x {} matrix. "
                                 "Both dimensions must be positive".format(rows, cols))

            # Matrix(2, 2, lambda i, j: i+j)
            if len(args) == 3 and isinstance(args[2], Callable):
                op = args[2]
                flat_list = []
                for i in range(rows):
                    flat_list.extend(
                        [cls._sympify(op(cls._sympify(i), cls._sympify(j)))
                         for j in range(cols)])

            # Matrix(2, 2, [1, 2, 3, 4])
            elif len(args) == 3 and is_sequence(args[2]):
                flat_list = args[2]
                if len(flat_list) != rows * cols:
                    raise ValueError(
                        'List length should be equal to rows*columns')
                flat_list = [cls._sympify(i) for i in flat_list]


        # Matrix()
        elif len(args) == 0:
            # Empty Matrix
            rows = cols = 0
            flat_list = []

        if flat_list is None:
            raise TypeError(filldedent('''
                Data type not understood; expecting list of lists
                or lists of values.'''))

        return rows, cols, flat_list

    def _setitem(self, key, value):
        """Helper to set value at location given by key.

        Examples
        ========

        >>> from sympy import Matrix, I, zeros, ones
        >>> m = Matrix(((1, 2+I), (3, 4)))
        >>> m
        Matrix([
        [1, 2 + I],
        [3,     4]])
        >>> m[1, 0] = 9
        >>> m
        Matrix([
        [1, 2 + I],
        [9,     4]])
        >>> m[1, 0] = [[0, 1]]

        To replace row r you assign to position r*m where m
        is the number of columns:

        >>> M = zeros(4)
        >>> m = M.cols
        >>> M[3*m] = ones(1, m)*2; M
        Matrix([
        [0, 0, 0, 0],
        [0, 0, 0, 0],
        [0, 0, 0, 0],
        [2, 2, 2, 2]])

        And to replace column c you can assign to position c:

        >>> M[2] = ones(m, 1)*4; M
        Matrix([
        [0, 0, 4, 0],
        [0, 0, 4, 0],
        [0, 0, 4, 0],
        [2, 2, 4, 2]])
        """
        from .dense import Matrix

        is_slice = isinstance(key, slice)
        i, j = key = self.key2ij(key)
        is_mat = isinstance(value, MatrixBase)
        if type(i) is slice or type(j) is slice:
            if is_mat:
                self.copyin_matrix(key, value)
                return
            if not isinstance(value, Expr) and is_sequence(value):
                self.copyin_list(key, value)
                return
            raise ValueError('unexpected value: %s' % value)
        else:
            if (not is_mat and
                    not isinstance(value, Basic) and is_sequence(value)):
                value = Matrix(value)
                is_mat = True
            if is_mat:
                if is_slice:
                    key = (slice(*divmod(i, self.cols)),
                           slice(*divmod(j, self.cols)))
                else:
                    key = (slice(i, i + value.rows),
                           slice(j, j + value.cols))
                self.copyin_matrix(key, value)
            else:
                return i, j, self._sympify(value)
            return

    def add(self, b):
        """Return self + b """
        return self + b

    def cholesky_solve(self, rhs, dotprodsimp=None):
        """Solves ``Ax = B`` using Cholesky decomposition,
        for a general square non-singular matrix.
        For a non-square matrix with rows > cols,
        the least squares solution is returned.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        See Also
        ========

        lower_triangular_solve
        upper_triangular_solve
        gauss_jordan_solve
        diagonal_solve
        LDLsolve
        LUsolve
        QRsolve
        pinv_solve
        """
        hermitian = True
        if self.is_symmetric():
            hermitian = False
            L = self._cholesky(hermitian=hermitian, dotprodsimp=dotprodsimp)
        elif self.is_hermitian:
            L = self._cholesky(hermitian=hermitian, dotprodsimp=dotprodsimp)
        elif self.rows >= self.cols:
            L = (self.H * self)._cholesky(hermitian=hermitian, dotprodsimp=dotprodsimp)
            rhs = self.H * rhs
        else:
            raise NotImplementedError('Under-determined System. '
                                      'Try M.gauss_jordan_solve(rhs)')
        Y = L._lower_triangular_solve(rhs, dotprodsimp=dotprodsimp)
        if hermitian:
            return (L.H)._upper_triangular_solve(Y, dotprodsimp=dotprodsimp)
        else:
            return (L.T)._upper_triangular_solve(Y, dotprodsimp=dotprodsimp)

    def cholesky(self, hermitian=True, dotprodsimp=None):
        """Returns the Cholesky-type decomposition L of a matrix A
        such that L * L.H == A if hermitian flag is True,
        or L * L.T == A if hermitian is False.

        A must be a Hermitian positive-definite matrix if hermitian is True,
        or a symmetric matrix if it is False.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Examples
        ========

        >>> from sympy.matrices import Matrix
        >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
        >>> A.cholesky()
        Matrix([
        [ 5, 0, 0],
        [ 3, 3, 0],
        [-1, 1, 3]])
        >>> A.cholesky() * A.cholesky().T
        Matrix([
        [25, 15, -5],
        [15, 18,  0],
        [-5,  0, 11]])

        The matrix can have complex entries:

        >>> from sympy import I
        >>> A = Matrix(((9, 3*I), (-3*I, 5)))
        >>> A.cholesky()
        Matrix([
        [ 3, 0],
        [-I, 2]])
        >>> A.cholesky() * A.cholesky().H
        Matrix([
        [   9, 3*I],
        [-3*I,   5]])

        Non-hermitian Cholesky-type decomposition may be useful when the
        matrix is not positive-definite.

        >>> A = Matrix([[1, 2], [2, 1]])
        >>> L = A.cholesky(hermitian=False)
        >>> L
        Matrix([
        [1,         0],
        [2, sqrt(3)*I]])
        >>> L*L.T == A
        True

        See Also
        ========

        LDLdecomposition
        LUdecomposition
        QRdecomposition
        """

        if not self.is_square:
            raise NonSquareMatrixError("Matrix must be square.")
        if hermitian and not self.is_hermitian:
            raise ValueError("Matrix must be Hermitian.")
        if not hermitian and not self.is_symmetric():
            raise ValueError("Matrix must be symmetric.")
        return self._cholesky(hermitian=hermitian, dotprodsimp=dotprodsimp)

    def condition_number(self, dotprodsimp=None):
        """Returns the condition number of a matrix.

        This is the maximum singular value divided by the minimum singular value

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Examples
        ========

        >>> from sympy import Matrix, S
        >>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
        >>> A.condition_number()
        100

        See Also
        ========

        singular_values
        """

        if not self:
            return self.zero
        singularvalues = self.singular_values(dotprodsimp=dotprodsimp)
        return Max(*singularvalues) / Min(*singularvalues)

    def copy(self):
        """
        Returns the copy of a matrix.

        Examples
        ========

        >>> from sympy import Matrix
        >>> A = Matrix(2, 2, [1, 2, 3, 4])
        >>> A.copy()
        Matrix([
        [1, 2],
        [3, 4]])

        """
        return self._new(self.rows, self.cols, self._mat)

    def cross(self, b):
        r"""
        Return the cross product of ``self`` and ``b`` relaxing the condition
        of compatible dimensions: if each has 3 elements, a matrix of the
        same type and shape as ``self`` will be returned. If ``b`` has the same
        shape as ``self`` then common identities for the cross product (like
        `a \times b = - b \times a`) will hold.

        Parameters
        ==========
            b : 3x1 or 1x3 Matrix

        See Also
        ========

        dot
        multiply
        multiply_elementwise
        """
        if not is_sequence(b):
            raise TypeError(
                "`b` must be an ordered iterable or Matrix, not %s." %
                type(b))
        if not (self.rows * self.cols == b.rows * b.cols == 3):
            raise ShapeError("Dimensions incorrect for cross product: %s x %s" %
                             ((self.rows, self.cols), (b.rows, b.cols)))
        else:
            return self._new(self.rows, self.cols, (
                (self[1] * b[2] - self[2] * b[1]),
                (self[2] * b[0] - self[0] * b[2]),
                (self[0] * b[1] - self[1] * b[0])))

    @property
    def D(self):
        """Return Dirac conjugate (if ``self.rows == 4``).

        Examples
        ========

        >>> from sympy import Matrix, I, eye
        >>> m = Matrix((0, 1 + I, 2, 3))
        >>> m.D
        Matrix([[0, 1 - I, -2, -3]])
        >>> m = (eye(4) + I*eye(4))
        >>> m[0, 3] = 2
        >>> m.D
        Matrix([
        [1 - I,     0,      0,      0],
        [    0, 1 - I,      0,      0],
        [    0,     0, -1 + I,      0],
        [    2,     0,      0, -1 + I]])

        If the matrix does not have 4 rows an AttributeError will be raised
        because this property is only defined for matrices with 4 rows.

        >>> Matrix(eye(2)).D
        Traceback (most recent call last):
        ...
        AttributeError: Matrix has no attribute D.

        See Also
        ========

        sympy.matrices.common.MatrixCommon.conjugate: By-element conjugation
        sympy.matrices.common.MatrixCommon.H: Hermite conjugation
        """
        from sympy.physics.matrices import mgamma
        if self.rows != 4:
            # In Python 3.2, properties can only return an AttributeError
            # so we can't raise a ShapeError -- see commit which added the
            # first line of this inline comment. Also, there is no need
            # for a message since MatrixBase will raise the AttributeError
            raise AttributeError
        return self.H * mgamma(0)

    def diagonal_solve(self, rhs):
        """Solves ``Ax = B`` efficiently, where A is a diagonal Matrix,
        with non-zero diagonal entries.

        Examples
        ========

        >>> from sympy.matrices import Matrix, eye
        >>> A = eye(2)*2
        >>> B = Matrix([[1, 2], [3, 4]])
        >>> A.diagonal_solve(B) == B/2
        True

        See Also
        ========

        lower_triangular_solve
        upper_triangular_solve
        gauss_jordan_solve
        cholesky_solve
        LDLsolve
        LUsolve
        QRsolve
        pinv_solve
        """
        if not self.is_diagonal():
            raise TypeError("Matrix should be diagonal")
        if rhs.rows != self.rows:
            raise TypeError("Size mis-match")
        return self._diagonal_solve(rhs)

    def dot(self, b, hermitian=None, conjugate_convention=None):
        """Return the dot or inner product of two vectors of equal length.
        Here ``self`` must be a ``Matrix`` of size 1 x n or n x 1, and ``b``
        must be either a matrix of size 1 x n, n x 1, or a list/tuple of length n.
        A scalar is returned.

        By default, ``dot`` does not conjugate ``self`` or ``b``, even if there are
        complex entries. Set ``hermitian=True`` (and optionally a ``conjugate_convention``)
        to compute the hermitian inner product.

        Possible kwargs are ``hermitian`` and ``conjugate_convention``.

        If ``conjugate_convention`` is ``"left"``, ``"math"`` or ``"maths"``,
        the conjugate of the first vector (``self``) is used.  If ``"right"``
        or ``"physics"`` is specified, the conjugate of the second vector ``b`` is used.

        Examples
        ========

        >>> from sympy import Matrix
        >>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
        >>> v = Matrix([1, 1, 1])
        >>> M.row(0).dot(v)
        6
        >>> M.col(0).dot(v)
        12
        >>> v = [3, 2, 1]
        >>> M.row(0).dot(v)
        10

        >>> from sympy import I
        >>> q = Matrix([1*I, 1*I, 1*I])
        >>> q.dot(q, hermitian=False)
        -3

        >>> q.dot(q, hermitian=True)
        3

        >>> q1 = Matrix([1, 1, 1*I])
        >>> q.dot(q1, hermitian=True, conjugate_convention="maths")
        1 - 2*I
        >>> q.dot(q1, hermitian=True, conjugate_convention="physics")
        1 + 2*I


        See Also
        ========

        cross
        multiply
        multiply_elementwise
        """
        from .dense import Matrix

        if not isinstance(b, MatrixBase):
            if is_sequence(b):
                if len(b) != self.cols and len(b) != self.rows:
                    raise ShapeError(
                        "Dimensions incorrect for dot product: %s, %s" % (
                            self.shape, len(b)))
                return self.dot(Matrix(b))
            else:
                raise TypeError(
                    "`b` must be an ordered iterable or Matrix, not %s." %
                    type(b))

        mat = self
        if (1 not in mat.shape) or (1 not in b.shape) :
            SymPyDeprecationWarning(
                feature="Dot product of non row/column vectors",
                issue=13815,
                deprecated_since_version="1.2",
                useinstead="* to take matrix products").warn()
            return mat._legacy_array_dot(b)
        if len(mat) != len(b):
            raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (self.shape, b.shape))
        n = len(mat)
        if mat.shape != (1, n):
            mat = mat.reshape(1, n)
        if b.shape != (n, 1):
            b = b.reshape(n, 1)

        # Now ``mat`` is a row vector and ``b`` is a column vector.

        # If it so happens that only conjugate_convention is passed
        # then automatically set hermitian to True. If only hermitian
        # is true but no conjugate_convention is not passed then
        # automatically set it to ``"maths"``

        if conjugate_convention is not None and hermitian is None:
            hermitian = True
        if hermitian and conjugate_convention is None:
            conjugate_convention = "maths"

        if hermitian == True:
            if conjugate_convention in ("maths", "left", "math"):
                mat = mat.conjugate()
            elif conjugate_convention in ("physics", "right"):
                b = b.conjugate()
            else:
                raise ValueError("Unknown conjugate_convention was entered."
                                 " conjugate_convention must be one of the"
                                 " following: math, maths, left, physics or right.")
        return (mat * b)[0]

    def dual(self):
        """Returns the dual of a matrix, which is:

        ``(1/2)*levicivita(i, j, k, l)*M(k, l)`` summed over indices `k` and `l`

        Since the levicivita method is anti_symmetric for any pairwise
        exchange of indices, the dual of a symmetric matrix is the zero
        matrix. Strictly speaking the dual defined here assumes that the
        'matrix' `M` is a contravariant anti_symmetric second rank tensor,
        so that the dual is a covariant second rank tensor.

        """
        from sympy import LeviCivita
        from sympy.matrices import zeros

        M, n = self[:, :], self.rows
        work = zeros(n)
        if self.is_symmetric():
            return work

        for i in range(1, n):
            for j in range(1, n):
                acum = 0
                for k in range(1, n):
                    acum += LeviCivita(i, j, 0, k) * M[0, k]
                work[i, j] = acum
                work[j, i] = -acum

        for l in range(1, n):
            acum = 0
            for a in range(1, n):
                for b in range(1, n):
                    acum += LeviCivita(0, l, a, b) * M[a, b]
            acum /= 2
            work[0, l] = -acum
            work[l, 0] = acum

        return work

    def _eval_matrix_exp_jblock(self):
        """A helper function to compute an exponential of a Jordan block
        matrix

        Examples
        ========

        >>> from sympy import Symbol, Matrix
        >>> l = Symbol('lamda')

        A trivial example of 1*1 Jordan block:

        >>> m = Matrix.jordan_block(1, l)
        >>> m._eval_matrix_exp_jblock()
        Matrix([[exp(lamda)]])

        An example of 3*3 Jordan block:

        >>> m = Matrix.jordan_block(3, l)
        >>> m._eval_matrix_exp_jblock()
        Matrix([
        [exp(lamda), exp(lamda), exp(lamda)/2],
        [         0, exp(lamda),   exp(lamda)],
        [         0,          0,   exp(lamda)]])

        References
        ==========

        .. [1] https://en.wikipedia.org/wiki/Matrix_function#Jordan_decomposition
        """
        size = self.rows
        l = self[0, 0]
        exp_l = exp(l)

        bands = {i: exp_l / factorial(i) for i in range(size)}

        from .sparsetools import banded
        return self.__class__(banded(size, bands))

    def exp(self, dotprodsimp=None):
        """Return the exponential of a square matrix

        Examples
        ========

        >>> from sympy import Symbol, Matrix

        >>> t = Symbol('t')
        >>> m = Matrix([[0, 1], [-1, 0]]) * t
        >>> m.exp()
        Matrix([
        [    exp(I*t)/2 + exp(-I*t)/2, -I*exp(I*t)/2 + I*exp(-I*t)/2],
        [I*exp(I*t)/2 - I*exp(-I*t)/2,      exp(I*t)/2 + exp(-I*t)/2]])

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.
        """
        if not self.is_square:
            raise NonSquareMatrixError(
                "Exponentiation is valid only for square matrices")
        try:
            P, J = self.jordan_form()
            cells = J.get_diag_blocks()
        except MatrixError:
            raise NotImplementedError(
                "Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")

        blocks = [cell._eval_matrix_exp_jblock() for cell in cells]
        from sympy.matrices import diag
        from sympy import re
        eJ = diag(*blocks)
        # n = self.rows
        ret = P.multiply(eJ, dotprodsimp=dotprodsimp).multiply(P.inv(), dotprodsimp=dotprodsimp)
        if all(value.is_real for value in self.values()):
            return type(self)(re(ret))
        else:
            return type(self)(ret)

    def _eval_matrix_log_jblock(self):
        """Helper function to compute logarithm of a jordan block.

        Examples
        ========

        >>> from sympy import Symbol, Matrix
        >>> l = Symbol('lamda')

        A trivial example of 1*1 Jordan block:

        >>> m = Matrix.jordan_block(1, l)
        >>> m._eval_matrix_log_jblock()
        Matrix([[log(lamda)]])

        An example of 3*3 Jordan block:

        >>> m = Matrix.jordan_block(3, l)
        >>> m._eval_matrix_log_jblock()
        Matrix([
        [log(lamda),    1/lamda, -1/(2*lamda**2)],
        [         0, log(lamda),         1/lamda],
        [         0,          0,      log(lamda)]])
        """
        size = self.rows
        l = self[0, 0]

        if l.is_zero:
            raise MatrixError(
                'Could not take logarithm or reciprocal for the given '
                'eigenvalue {}'.format(l))

        bands = {0: log(l)}
        for i in range(1, size):
            bands[i] = -((-l) ** -i) / i

        from .sparsetools import banded
        return self.__class__(banded(size, bands))

    def log(self, simplify=cancel):
        """Return the logarithm of a square matrix

        Parameters
        ==========

        simplify : function, bool
            The function to simplify the result with.

            Default is ``cancel``, which is effective to reduce the
            expression growing for taking reciprocals and inverses for
            symbolic matrices.

        Examples
        ========

        >>> from sympy import S, Matrix

        Examples for positive-definite matrices:

        >>> m = Matrix([[1, 1], [0, 1]])
        >>> m.log()
        Matrix([
        [0, 1],
        [0, 0]])

        >>> m = Matrix([[S(5)/4, S(3)/4], [S(3)/4, S(5)/4]])
        >>> m.log()
        Matrix([
        [     0, log(2)],
        [log(2),      0]])

        Examples for non positive-definite matrices:

        >>> m = Matrix([[S(3)/4, S(5)/4], [S(5)/4, S(3)/4]])
        >>> m.log()
        Matrix([
        [         I*pi/2, log(2) - I*pi/2],
        [log(2) - I*pi/2,          I*pi/2]])

        >>> m = Matrix(
        ...     [[0, 0, 0, 1],
        ...      [0, 0, 1, 0],
        ...      [0, 1, 0, 0],
        ...      [1, 0, 0, 0]])
        >>> m.log()
        Matrix([
        [ I*pi/2,       0,       0, -I*pi/2],
        [      0,  I*pi/2, -I*pi/2,       0],
        [      0, -I*pi/2,  I*pi/2,       0],
        [-I*pi/2,       0,       0,  I*pi/2]])
        """
        if not self.is_square:
            raise NonSquareMatrixError(
                "Logarithm is valid only for square matrices")

        try:
            if simplify:
                P, J = simplify(self).jordan_form()
            else:
                P, J = self.jordan_form()

            cells = J.get_diag_blocks()
        except MatrixError:
            raise NotImplementedError(
                "Logarithm is implemented only for matrices for which "
                "the Jordan normal form can be computed")

        blocks = [
            cell._eval_matrix_log_jblock()
            for cell in cells]
        from sympy.matrices import diag
        eJ = diag(*blocks)

        if simplify:
            ret = simplify(P * eJ * simplify(P.inv()))
            ret = self.__class__(ret)
        else:
            ret = P * eJ * P.inv()

        return ret

    def gauss_jordan_solve(self, B, freevar=False, dotprodsimp=None):
        """
        Solves ``Ax = B`` using Gauss Jordan elimination.

        There may be zero, one, or infinite solutions.  If one solution
        exists, it will be returned. If infinite solutions exist, it will
        be returned parametrically. If no solutions exist, It will throw
        ValueError.

        Parameters
        ==========

        B : Matrix
            The right hand side of the equation to be solved for.  Must have
            the same number of rows as matrix A.

        freevar : List
            If the system is underdetermined (e.g. A has more columns than
            rows), infinite solutions are possible, in terms of arbitrary
            values of free variables. Then the index of the free variables
            in the solutions (column Matrix) will be returned by freevar, if
            the flag `freevar` is set to `True`.

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Returns
        =======

        x : Matrix
            The matrix that will satisfy ``Ax = B``.  Will have as many rows as
            matrix A has columns, and as many columns as matrix B.

        params : Matrix
            If the system is underdetermined (e.g. A has more columns than
            rows), infinite solutions are possible, in terms of arbitrary
            parameters. These arbitrary parameters are returned as params
            Matrix.

        Examples
        ========

        >>> from sympy import Matrix
        >>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]])
        >>> B = Matrix([7, 12, 4])
        >>> sol, params = A.gauss_jordan_solve(B)
        >>> sol
        Matrix([
        [-2*tau0 - 3*tau1 + 2],
        [                 tau0],
        [           2*tau1 + 5],
        [                 tau1]])
        >>> params
        Matrix([
        [tau0],
        [tau1]])
        >>> taus_zeroes = { tau:0 for tau in params }
        >>> sol_unique = sol.xreplace(taus_zeroes)
        >>> sol_unique
         Matrix([
        [2],
        [0],
        [5],
        [0]])


        >>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
        >>> B = Matrix([3, 6, 9])
        >>> sol, params = A.gauss_jordan_solve(B)
        >>> sol
        Matrix([
        [-1],
        [ 2],
        [ 0]])
        >>> params
        Matrix(0, 1, [])

        >>> A = Matrix([[2, -7], [-1, 4]])
        >>> B = Matrix([[-21, 3], [12, -2]])
        >>> sol, params = A.gauss_jordan_solve(B)
        >>> sol
        Matrix([
        [0, -2],
        [3, -1]])
        >>> params
        Matrix(0, 2, [])

        See Also
        ========

        lower_triangular_solve
        upper_triangular_solve
        cholesky_solve
        diagonal_solve
        LDLsolve
        LUsolve
        QRsolve
        pinv

        References
        ==========

        .. [1] https://en.wikipedia.org/wiki/Gaussian_elimination

        """
        from sympy.matrices import Matrix, zeros

        cls = self.__class__

        aug = self.hstack(self.copy(), B.copy())
        B_cols = B.cols
        row, col = aug[:, :-B_cols].shape

        # solve by reduced row echelon form
        A, pivots = aug.rref(simplify=True, dotprodsimp=dotprodsimp)
        A, v = A[:, :-B_cols], A[:, -B_cols:]
        pivots = list(filter(lambda p: p < col, pivots))
        rank = len(pivots)

        # Bring to block form
        permutation = Matrix(range(col)).T

        for i, c in enumerate(pivots):
            permutation.col_swap(i, c)


        # check for existence of solutions
        # rank of aug Matrix should be equal to rank of coefficient matrix
        if not v[rank:, :].is_zero_matrix:
            raise ValueError("Linear system has no solution")

        # Get index of free symbols (free parameters)
        free_var_index = permutation[
                         len(pivots):]  # non-pivots columns are free variables

        # Free parameters
        # what are current unnumbered free symbol names?
        name = _uniquely_named_symbol('tau', aug,
            compare=lambda i: str(i).rstrip('1234567890')).name
        gen = numbered_symbols(name)
        tau = Matrix([next(gen) for k in range((col - rank)*B_cols)]).reshape(
            col - rank, B_cols)

        # Full parametric solution
        V = A[:rank, [c for c in range(A.cols) if c not in pivots]]
        vt = v[:rank, :]
        free_sol = tau.vstack(vt - V * tau, tau)

        # Undo permutation
        sol = zeros(col, B_cols)
        for k in range(col):
            sol[permutation[k], :] = free_sol[k,:]

        sol, tau = cls(sol), cls(tau)
        if freevar:
            return sol, tau, free_var_index
        else:
            return sol, tau

    def inv_mod(self, m):
        r"""
        Returns the inverse of the matrix `K` (mod `m`), if it exists.

        Method to find the matrix inverse of `K` (mod `m`) implemented in this function:

        * Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.

        * Compute `r = 1/\mathrm{det}(K) \pmod m`.

        * `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.

        Examples
        ========

        >>> from sympy import Matrix
        >>> A = Matrix(2, 2, [1, 2, 3, 4])
        >>> A.inv_mod(5)
        Matrix([
        [3, 1],
        [4, 2]])
        >>> A.inv_mod(3)
        Matrix([
        [1, 1],
        [0, 1]])

        """
        if not self.is_square:
            raise NonSquareMatrixError()
        N = self.cols
        det_K = self.det()
        det_inv = None

        try:
            det_inv = mod_inverse(det_K, m)
        except ValueError:
            raise NonInvertibleMatrixError('Matrix is not invertible (mod %d)' % m)

        K_adj = self.adjugate()
        K_inv = self.__class__(N, N,
                               [det_inv * K_adj[i, j] % m for i in range(N) for
                                j in range(N)])
        return K_inv

    def inverse_ADJ(self, iszerofunc=_iszero, dotprodsimp=None):
        """Calculates the inverse using the adjugate matrix and a determinant.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        See Also
        ========

        inv
        inverse_LU
        inverse_GE
        """
        if not self.is_square:
            raise NonSquareMatrixError("A Matrix must be square to invert.")

        d = self.det(method='berkowitz', dotprodsimp=dotprodsimp)
        zero = d.equals(0)
        if zero is None:
            # if equals() can't decide, will rref be able to?
            ok = self.rref(simplify=True, dotprodsimp=dotprodsimp)[0]
            zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
        if zero:
            raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")

        return self.adjugate(dotprodsimp=dotprodsimp) / d

    def inverse_GE(self, iszerofunc=_iszero, dotprodsimp=None):
        """Calculates the inverse using Gaussian elimination.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        See Also
        ========

        inv
        inverse_LU
        inverse_ADJ
        """
        from .dense import Matrix
        if not self.is_square:
            raise NonSquareMatrixError("A Matrix must be square to invert.")

        big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
        red = big.rref(iszerofunc=iszerofunc, simplify=True, dotprodsimp=dotprodsimp)[0]
        if any(iszerofunc(red[j, j]) for j in range(red.rows)):
            raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")

        return self._new(red[:, big.rows:])

    def inverse_LU(self, iszerofunc=_iszero, dotprodsimp=None):
        """Calculates the inverse using LU decomposition.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        See Also
        ========

        inv
        inverse_GE
        inverse_ADJ
        """
        if not self.is_square:
            raise NonSquareMatrixError()

        ok = self.rref(simplify=True, dotprodsimp=dotprodsimp)[0]
        if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
            raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")

        return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero,
                dotprodsimp=dotprodsimp)

    def inv(self, method=None, dotprodsimp=None, **kwargs):
        """
        Return the inverse of a matrix.

        CASE 1: If the matrix is a dense matrix.

        Return the matrix inverse using the method indicated (default
        is Gauss elimination).

        Parameters
        ==========

        method : ('GE', 'LU', or 'ADJ') for dense matrices,
                 ('LDL', 'CH') for sparse

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Notes
        =====

        According to the ``method`` keyword, it calls the appropriate method:

          GE .... inverse_GE(); default
          LU .... inverse_LU()
          ADJ ... inverse_ADJ()

        See Also
        ========

        inverse_LU
        inverse_GE
        inverse_ADJ

        Raises
        ------
        ValueError
            If the determinant of the matrix is zero.

        CASE 2: If the matrix is a sparse matrix.

        Return the matrix inverse using Cholesky or LDL (default).

        kwargs
        ======

        method : ('CH', 'LDL')

        Notes
        =====

        According to the ``method`` keyword, it calls the appropriate method:

          LDL ... inverse_LDL(); default
          CH .... inverse_CH()

        Raises
        ------
        ValueError
            If the determinant of the matrix is zero.

        """
        if not self.is_square:
            raise NonSquareMatrixError()
        if method is not None:
            kwargs['method'] = method
        if dotprodsimp is not None:
            kwargs['dotprodsimp'] = dotprodsimp
        return self._eval_inverse(**kwargs)

    def is_nilpotent(self, dotprodsimp=None):
        """Checks if a matrix is nilpotent.

        A matrix B is nilpotent if for some integer k, B**k is
        a zero matrix.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Examples
        ========

        >>> from sympy import Matrix
        >>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
        >>> a.is_nilpotent()
        True

        >>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
        >>> a.is_nilpotent()
        False
        """
        if not self:
            return True
        if not self.is_square:
            raise NonSquareMatrixError(
                "Nilpotency is valid only for square matrices")
        x = _uniquely_named_symbol('x', self)
        p = self.charpoly(x, dotprodsimp=dotprodsimp)
        if p.args[0] == x ** self.rows:
            return True
        return False

    def key2bounds(self, keys):
        """Converts a key with potentially mixed types of keys (integer and slice)
        into a tuple of ranges and raises an error if any index is out of ``self``'s
        range.

        See Also
        ========

        key2ij
        """
        from sympy.matrices.common import a2idx as a2idx_ # Remove this line after deprecation of a2idx from matrices.py

        islice, jslice = [isinstance(k, slice) for k in keys]
        if islice:
            if not self.rows:
                rlo = rhi = 0
            else:
                rlo, rhi = keys[0].indices(self.rows)[:2]
        else:
            rlo = a2idx_(keys[0], self.rows)
            rhi = rlo + 1
        if jslice:
            if not self.cols:
                clo = chi = 0
            else:
                clo, chi = keys[1].indices(self.cols)[:2]
        else:
            clo = a2idx_(keys[1], self.cols)
            chi = clo + 1
        return rlo, rhi, clo, chi

    def key2ij(self, key):
        """Converts key into canonical form, converting integers or indexable
        items into valid integers for ``self``'s range or returning slices
        unchanged.

        See Also
        ========

        key2bounds
        """
        from sympy.matrices.common import a2idx as a2idx_ # Remove this line after deprecation of a2idx from matrices.py

        if is_sequence(key):
            if not len(key) == 2:
                raise TypeError('key must be a sequence of length 2')
            return [a2idx_(i, n) if not isinstance(i, slice) else i
                    for i, n in zip(key, self.shape)]
        elif isinstance(key, slice):
            return key.indices(len(self))[:2]
        else:
            return divmod(a2idx_(key, len(self)), self.cols)

    def LDLdecomposition(self, hermitian=True, dotprodsimp=None):
        """Returns the LDL Decomposition (L, D) of matrix A,
        such that L * D * L.H == A if hermitian flag is True, or
        L * D * L.T == A if hermitian is False.
        This method eliminates the use of square root.
        Further this ensures that all the diagonal entries of L are 1.
        A must be a Hermitian positive-definite matrix if hermitian is True,
        or a symmetric matrix otherwise.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Examples
        ========

        >>> from sympy.matrices import Matrix, eye
        >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
        >>> L, D = A.LDLdecomposition()
        >>> L
        Matrix([
        [   1,   0, 0],
        [ 3/5,   1, 0],
        [-1/5, 1/3, 1]])
        >>> D
        Matrix([
        [25, 0, 0],
        [ 0, 9, 0],
        [ 0, 0, 9]])
        >>> L * D * L.T * A.inv() == eye(A.rows)
        True

        The matrix can have complex entries:

        >>> from sympy import I
        >>> A = Matrix(((9, 3*I), (-3*I, 5)))
        >>> L, D = A.LDLdecomposition()
        >>> L
        Matrix([
        [   1, 0],
        [-I/3, 1]])
        >>> D
        Matrix([
        [9, 0],
        [0, 4]])
        >>> L*D*L.H == A
        True

        See Also
        ========

        cholesky
        LUdecomposition
        QRdecomposition
        """
        if not self.is_square:
            raise NonSquareMatrixError("Matrix must be square.")
        if hermitian and not self.is_hermitian:
            raise ValueError("Matrix must be Hermitian.")
        if not hermitian and not self.is_symmetric():
            raise ValueError("Matrix must be symmetric.")
        return self._LDLdecomposition(hermitian=hermitian, dotprodsimp=dotprodsimp)

    def LDLsolve(self, rhs, dotprodsimp=None):
        """Solves ``Ax = B`` using LDL decomposition,
        for a general square and non-singular matrix.

        For a non-square matrix with rows > cols,
        the least squares solution is returned.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Examples
        ========

        >>> from sympy.matrices import Matrix, eye
        >>> A = eye(2)*2
        >>> B = Matrix([[1, 2], [3, 4]])
        >>> A.LDLsolve(B) == B/2
        True

        See Also
        ========

        LDLdecomposition
        lower_triangular_solve
        upper_triangular_solve
        gauss_jordan_solve
        cholesky_solve
        diagonal_solve
        LUsolve
        QRsolve
        pinv_solve
        """
        hermitian = True
        if self.is_symmetric():
            hermitian = False
            L, D = self.LDLdecomposition(hermitian=hermitian, dotprodsimp=dotprodsimp)
        elif self.is_hermitian:
            L, D = self.LDLdecomposition(hermitian=hermitian, dotprodsimp=dotprodsimp)
        elif self.rows >= self.cols:
            L, D = self.H.multiply(self, dotprodsimp=dotprodsimp) \
                    .LDLdecomposition(hermitian=hermitian, dotprodsimp=dotprodsimp)
            rhs = self.H.multiply(rhs, dotprodsimp=dotprodsimp)
        else:
            raise NotImplementedError('Under-determined System. '
                                      'Try M.gauss_jordan_solve(rhs)')
        Y = L._lower_triangular_solve(rhs, dotprodsimp=dotprodsimp)
        Z = D._diagonal_solve(Y)
        if hermitian:
            return (L.H)._upper_triangular_solve(Z, dotprodsimp=dotprodsimp)
        else:
            return (L.T)._upper_triangular_solve(Z, dotprodsimp=dotprodsimp)

    def lower_triangular_solve(self, rhs, dotprodsimp=None):
        """Solves ``Ax = B``, where A is a lower triangular matrix.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        See Also
        ========

        upper_triangular_solve
        gauss_jordan_solve
        cholesky_solve
        diagonal_solve
        LDLsolve
        LUsolve
        QRsolve
        pinv_solve
        """

        if not self.is_square:
            raise NonSquareMatrixError("Matrix must be square.")
        if rhs.rows != self.rows:
            raise ShapeError("Matrices size mismatch.")
        if not self.is_lower:
            raise ValueError("Matrix must be lower triangular.")
        return self._lower_triangular_solve(rhs, dotprodsimp=dotprodsimp)

    def LUdecomposition(self,
                        iszerofunc=_iszero,
                        simpfunc=None,
                        rankcheck=False):
        """Returns (L, U, perm) where L is a lower triangular matrix with unit
        diagonal, U is an upper triangular matrix, and perm is a list of row
        swap index pairs. If A is the original matrix, then
        A = (L*U).permuteBkwd(perm), and the row permutation matrix P such
        that P*A = L*U can be computed by P=eye(A.row).permuteFwd(perm).

        See documentation for LUCombined for details about the keyword argument
        rankcheck, iszerofunc, and simpfunc.

        Examples
        ========

        >>> from sympy import Matrix
        >>> a = Matrix([[4, 3], [6, 3]])
        >>> L, U, _ = a.LUdecomposition()
        >>> L
        Matrix([
        [  1, 0],
        [3/2, 1]])
        >>> U
        Matrix([
        [4,    3],
        [0, -3/2]])

        See Also
        ========

        cholesky
        LDLdecomposition
        QRdecomposition
        LUdecomposition_Simple
        LUdecompositionFF
        LUsolve
        """

        combined, p = self.LUdecomposition_Simple(iszerofunc=iszerofunc,
                                                  simpfunc=simpfunc,
                                                  rankcheck=rankcheck)

        # L is lower triangular ``self.rows x self.rows``
        # U is upper triangular ``self.rows x self.cols``
        # L has unit diagonal. For each column in combined, the subcolumn
        # below the diagonal of combined is shared by L.
        # If L has more columns than combined, then the remaining subcolumns
        # below the diagonal of L are zero.
        # The upper triangular portion of L and combined are equal.
        def entry_L(i, j):
            if i < j:
                # Super diagonal entry
                return self.zero
            elif i == j:
                return self.one
            elif j < combined.cols:
                return combined[i, j]
            # Subdiagonal entry of L with no corresponding
            # entry in combined
            return self.zero

        def entry_U(i, j):
            return self.zero if i > j else combined[i, j]

        L = self._new(combined.rows, combined.rows, entry_L)
        U = self._new(combined.rows, combined.cols, entry_U)

        return L, U, p


    def LUdecomposition_Simple(
        self, iszerofunc=_iszero, simpfunc=None, rankcheck=False,
        dotprodsimp=None):
        r"""Compute the PLU decomposition of the matrix.

        Parameters
        ==========

        rankcheck : bool, optional
            Determines if this function should detect the rank
            deficiency of the matrixis and should raise a
            ``ValueError``.

        iszerofunc : function, optional
            A function which determines if a given expression is zero.

            The function should be a callable that takes a single
            sympy expression and returns a 3-valued boolean value
            ``True``, ``False``, or ``None``.

            It is internally used by the pivot searching algorithm.
            See the notes section for a more information about the
            pivot searching algorithm.

        simpfunc : function or None, optional
            A function that simplifies the input.

            If this is specified as a function, this function should be
            a callable that takes a single sympy expression and returns
            an another sympy expression that is algebraically
            equivalent.

            If ``None``, it indicates that the pivot search algorithm
            should not attempt to simplify any candidate pivots.

            It is internally used by the pivot searching algorithm.
            See the notes section for a more information about the
            pivot searching algorithm.

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification
            is used during matrix multiplications to control expression
            blowup and thus speed up calculation.

        Returns
        =======

        (lu, row_swaps) : (Matrix, list)
            If the original matrix is a $m, n$ matrix:

            *lu* is a $m, n$ matrix, which contains result of the
            decomposition in a compresed form. See the notes section
            to see how the matrix is compressed.

            *row_swaps* is a $m$-element list where each element is a
            pair of row exchange indices.

            ``A = (L*U).permute_backward(perm)``, and the row
            permutation matrix $P$ from the formula $P A = L U$ can be
            computed by ``P=eye(A.row).permute_forward(perm)``.

        Raises
        ======

        ValueError
            Raised if ``rankcheck=True`` and the matrix is found to
            be rank deficient during the computation.

        Notes
        =====

        About the PLU decomposition:

        PLU decomposition is a generalization of a LU decomposition
        which can be extended for rank-deficient matrices.

        It can further be generalized for non-square matrices, and this
        is the notation that SymPy is using.

        PLU decomposition is a decomposition of a $m, n$ matrix $A$ in
        the form of $P A = L U$ where

        * $L$ is a $m, m$ lower triangular matrix with unit diagonal
          entries.
        * $U$ is a $m, n$ upper triangular matrix.
        * $P$ is a $m, m$ permutation matrix.

        So, for a square matrix, the decomposition would look like:

        .. math::
            L = \begin{bmatrix}
            1 & 0 & 0 & \cdots & 0 \\
            L_{1, 0} & 1 & 0 & \cdots & 0 \\
            L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1
            \end{bmatrix}

        .. math::
            U = \begin{bmatrix}
            U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
            0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
            0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            0 & 0 & 0 & \cdots & U_{n-1, n-1}
            \end{bmatrix}

        And for a matrix with more rows than the columns,
        the decomposition would look like:

        .. math::
            L = \begin{bmatrix}
            1 & 0 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
            L_{1, 0} & 1 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
            L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 & 0 & \cdots & 0 \\
            \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \ddots
            & \vdots \\
            L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1 & 0
            & \cdots & 0 \\
            L_{n, 0} & L_{n, 1} & L_{n, 2} & \cdots & L_{n, n-1} & 1
            & \cdots & 0 \\
            \vdots & \vdots & \vdots & \ddots & \vdots & \vdots
            & \ddots & \vdots \\
            L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & L_{m-1, n-1}
            & 0 & \cdots & 1 \\
            \end{bmatrix}

        .. math::
            U = \begin{bmatrix}
            U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
            0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
            0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            0 & 0 & 0 & \cdots & U_{n-1, n-1} \\
            0 & 0 & 0 & \cdots & 0 \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            0 & 0 & 0 & \cdots & 0
            \end{bmatrix}

        Finally, for a matrix with more columns than the rows, the
        decomposition would look like:

        .. math::
            L = \begin{bmatrix}
            1 & 0 & 0 & \cdots & 0 \\
            L_{1, 0} & 1 & 0 & \cdots & 0 \\
            L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & 1
            \end{bmatrix}

        .. math::
            U = \begin{bmatrix}
            U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
            & \cdots & U_{0, n-1} \\
            0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
            & \cdots & U_{1, n-1} \\
            0 & 0 & U_{2, 2} & \cdots & U_{2, m-1}
            & \cdots & U_{2, n-1} \\
            \vdots & \vdots & \vdots & \ddots & \vdots
            & \cdots & \vdots \\
            0 & 0 & 0 & \cdots & U_{m-1, m-1}
            & \cdots & U_{m-1, n-1} \\
            \end{bmatrix}

        About the compressed LU storage:

        The results of the decomposition are often stored in compressed
        forms rather than returning $L$ and $U$ matrices individually.

        It may be less intiuitive, but it is commonly used for a lot of
        numeric libraries because of the efficiency.

        The storage matrix is defined as following for this specific
        method:

        * The subdiagonal elements of $L$ are stored in the subdiagonal
          portion of $LU$, that is $LU_{i, j} = L_{i, j}$ whenever
          $i > j$.
        * The elements on the diagonal of $L$ are all 1, and are not
          explicitly stored.
        * $U$ is stored in the upper triangular portion of $LU$, that is
          $LU_{i, j} = U_{i, j}$ whenever $i <= j$.
        * For a case of $m > n$, the right side of the $L$ matrix is
          trivial to store.
        * For a case of $m < n$, the below side of the $U$ matrix is
          trivial to store.

        So, for a square matrix, the compressed output matrix would be:

        .. math::
            LU = \begin{bmatrix}
            U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
            L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
            L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & U_{n-1, n-1}
            \end{bmatrix}

        For a matrix with more rows than the columns, the compressed
        output matrix would be:

        .. math::
            LU = \begin{bmatrix}
            U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
            L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
            L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots
            & U_{n-1, n-1} \\
            \vdots & \vdots & \vdots & \ddots & \vdots \\
            L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots
            & L_{m-1, n-1} \\
            \end{bmatrix}

        For a matrix with more columns than the rows, the compressed
        output matrix would be:

        .. math::
            LU = \begin{bmatrix}
            U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
            & \cdots & U_{0, n-1} \\
            L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
            & \cdots & U_{1, n-1} \\
            L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, m-1}
            & \cdots & U_{2, n-1} \\
            \vdots & \vdots & \vdots & \ddots & \vdots
            & \cdots & \vdots \\
            L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & U_{m-1, m-1}
            & \cdots & U_{m-1, n-1} \\
            \end{bmatrix}

        About the pivot searching algorithm:

        When a matrix contains symbolic entries, the pivot search algorithm
        differs from the case where every entry can be categorized as zero or
        nonzero.
        The algorithm searches column by column through the submatrix whose
        top left entry coincides with the pivot position.
        If it exists, the pivot is the first entry in the current search
        column that iszerofunc guarantees is nonzero.
        If no such candidate exists, then each candidate pivot is simplified
        if simpfunc is not None.
        The search is repeated, with the difference that a candidate may be
        the pivot if ``iszerofunc()`` cannot guarantee that it is nonzero.
        In the second search the pivot is the first candidate that
        iszerofunc can guarantee is nonzero.
        If no such candidate exists, then the pivot is the first candidate
        for which iszerofunc returns None.
        If no such candidate exists, then the search is repeated in the next
        column to the right.
        The pivot search algorithm differs from the one in ``rref()``, which
        relies on ``_find_reasonable_pivot()``.
        Future versions of ``LUdecomposition_simple()`` may use
        ``_find_reasonable_pivot()``.

        See Also
        ========

        LUdecomposition
        LUdecompositionFF
        LUsolve
        """

        if rankcheck:
            # https://github.com/sympy/sympy/issues/9796
            pass

        if self.rows == 0 or self.cols == 0:
            # Define LU decomposition of a matrix with no entries as a matrix
            # of the same dimensions with all zero entries.
            return self.zeros(self.rows, self.cols), []

        dps = _dotprodsimp if dotprodsimp else lambda e: e
        lu = self.as_mutable()
        row_swaps = []

        pivot_col = 0
        for pivot_row in range(0, lu.rows - 1):
            # Search for pivot. Prefer entry that iszeropivot determines
            # is nonzero, over entry that iszeropivot cannot guarantee
            # is  zero.
            # XXX ``_find_reasonable_pivot`` uses slow zero testing. Blocked by bug #10279
            # Future versions of LUdecomposition_simple can pass iszerofunc and simpfunc
            # to _find_reasonable_pivot().
            # In pass 3 of _find_reasonable_pivot(), the predicate in ``if x.equals(S.Zero):``
            # calls sympy.simplify(), and not the simplification function passed in via
            # the keyword argument simpfunc.

            iszeropivot = True
            while pivot_col != self.cols and iszeropivot:
                sub_col = (lu[r, pivot_col] for r in range(pivot_row, self.rows))
                pivot_row_offset, pivot_value, is_assumed_non_zero, ind_simplified_pairs =\
                    _find_reasonable_pivot_naive(sub_col, iszerofunc, simpfunc)
                iszeropivot = pivot_value is None
                if iszeropivot:
                    # All candidate pivots in this column are zero.
                    # Proceed to next column.
                    pivot_col += 1

            if rankcheck and pivot_col != pivot_row:
                # All entries including and below the pivot position are
                # zero, which indicates that the rank of the matrix is
                # strictly less than min(num rows, num cols)
                # Mimic behavior of previous implementation, by throwing a
                # ValueError.
                raise ValueError("Rank of matrix is strictly less than"
                                 " number of rows or columns."
                                 " Pass keyword argument"
                                 " rankcheck=False to compute"
                                 " the LU decomposition of this matrix.")

            candidate_pivot_row = None if pivot_row_offset is None else pivot_row + pivot_row_offset

            if candidate_pivot_row is None and iszeropivot:
                # If candidate_pivot_row is None and iszeropivot is True
                # after pivot search has completed, then the submatrix
                # below and to the right of (pivot_row, pivot_col) is
                # all zeros, indicating that Gaussian elimination is
                # complete.
                return lu, row_swaps

            # Update entries simplified during pivot search.
            for offset, val in ind_simplified_pairs:
                lu[pivot_row + offset, pivot_col] = val

            if pivot_row != candidate_pivot_row:
                # Row swap book keeping:
                # Record which rows were swapped.
                # Update stored portion of L factor by multiplying L on the
                # left and right with the current permutation.
                # Swap rows of U.
                row_swaps.append([pivot_row, candidate_pivot_row])

                # Update L.
                lu[pivot_row, 0:pivot_row], lu[candidate_pivot_row, 0:pivot_row] = \
                    lu[candidate_pivot_row, 0:pivot_row], lu[pivot_row, 0:pivot_row]

                # Swap pivot row of U with candidate pivot row.
                lu[pivot_row, pivot_col:lu.cols], lu[candidate_pivot_row, pivot_col:lu.cols] = \
                    lu[candidate_pivot_row, pivot_col:lu.cols], lu[pivot_row, pivot_col:lu.cols]

            # Introduce zeros below the pivot by adding a multiple of the
            # pivot row to a row under it, and store the result in the
            # row under it.
            # Only entries in the target row whose index is greater than
            # start_col may be nonzero.
            start_col = pivot_col + 1
            for row in range(pivot_row + 1, lu.rows):
                # Store factors of L in the subcolumn below
                # (pivot_row, pivot_row).
                lu[row, pivot_row] = \
                    dps(lu[row, pivot_col]/lu[pivot_row, pivot_col])

                # Form the linear combination of the pivot row and the current
                # row below the pivot row that zeros the entries below the pivot.
                # Employing slicing instead of a loop here raises
                # NotImplementedError: Cannot add Zero to MutableSparseMatrix
                # in sympy/matrices/tests/test_sparse.py.
                # c = pivot_row + 1 if pivot_row == pivot_col else pivot_col
                for c in range(start_col, lu.cols):
                    lu[row, c] = dps(lu[row, c] - lu[row, pivot_row]*lu[pivot_row, c])

            if pivot_row != pivot_col:
                # matrix rank < min(num rows, num cols),
                # so factors of L are not stored directly below the pivot.
                # These entries are zero by construction, so don't bother
                # computing them.
                for row in range(pivot_row + 1, lu.rows):
                    lu[row, pivot_col] = self.zero

            pivot_col += 1
            if pivot_col == lu.cols:
                # All candidate pivots are zero implies that Gaussian
                # elimination is complete.
                return lu, row_swaps

        if rankcheck:
            if iszerofunc(
            lu[Min(lu.rows, lu.cols) - 1, Min(lu.rows, lu.cols) - 1]):
                raise ValueError("Rank of matrix is strictly less than"
                                 " number of rows or columns."
                                 " Pass keyword argument"
                                 " rankcheck=False to compute"
                                 " the LU decomposition of this matrix.")

        return lu, row_swaps

    def LUdecompositionFF(self):
        """Compute a fraction-free LU decomposition.

        Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
        If the elements of the matrix belong to some integral domain I, then all
        elements of L, D and U are guaranteed to belong to I.

        **Reference**
            - W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
              for LU and QR factors". Frontiers in Computer Science in China,
              Vol 2, no. 1, pp. 67-80, 2008.

        See Also
        ========

        LUdecomposition
        LUdecomposition_Simple
        LUsolve
        """
        from sympy.matrices import SparseMatrix
        zeros = SparseMatrix.zeros
        eye = SparseMatrix.eye

        n, m = self.rows, self.cols
        U, L, P = self.as_mutable(), eye(n), eye(n)
        DD = zeros(n, n)
        oldpivot = 1

        for k in range(n - 1):
            if U[k, k] == 0:
                for kpivot in range(k + 1, n):
                    if U[kpivot, k]:
                        break
                else:
                    raise ValueError("Matrix is not full rank")
                U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
                L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
                P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
            L[k, k] = Ukk = U[k, k]
            DD[k, k] = oldpivot * Ukk
            for i in range(k + 1, n):
                L[i, k] = Uik = U[i, k]
                for j in range(k + 1, m):
                    U[i, j] = (Ukk * U[i, j] - U[k, j] * Uik) / oldpivot
                U[i, k] = 0
            oldpivot = Ukk
        DD[n - 1, n - 1] = oldpivot
        return P, L, DD, U

    def LUsolve(self, rhs, iszerofunc=_iszero, dotprodsimp=None):
        """Solve the linear system ``Ax = rhs`` for ``x`` where ``A = self``.

        This is for symbolic matrices, for real or complex ones use
        mpmath.lu_solve or mpmath.qr_solve.

        Parameters
        ==========

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        See Also
        ========

        lower_triangular_solve
        upper_triangular_solve
        gauss_jordan_solve
        cholesky_solve
        diagonal_solve
        LDLsolve
        QRsolve
        pinv_solve
        LUdecomposition
        """
        if rhs.rows != self.rows:
            raise ShapeError(
                "``self`` and ``rhs`` must have the same number of rows.")

        m = self.rows
        n = self.cols
        if m < n:
            raise NotImplementedError("Underdetermined systems not supported.")

        try:
            A, perm = self.LUdecomposition_Simple(
                iszerofunc=_iszero, rankcheck=True, dotprodsimp=dotprodsimp)
        except ValueError:
            raise NotImplementedError("Underdetermined systems not supported.")

        dps = _dotprodsimp if dotprodsimp else lambda e: e
        b = rhs.permute_rows(perm).as_mutable()
        # forward substitution, all diag entries are scaled to 1
        for i in range(m):
            for j in range(min(i, n)):
                scale = A[i, j]
                b.zip_row_op(i, j, lambda x, y: dps(x - y * scale))
        # consistency check for overdetermined systems
        if m > n:
            for i in range(n, m):
                for j in range(b.cols):
                    if not iszerofunc(b[i, j]):
                        raise ValueError("The system is inconsistent.")
            b = b[0:n, :]   # truncate zero rows if consistent
        # backward substitution
        for i in range(n - 1, -1, -1):
            for j in range(i + 1, n):
                scale = A[i, j]
                b.zip_row_op(i, j, lambda x, y: dps(x - y * scale))
            scale = A[i, i]
            b.row_op(i, lambda x, _: dps(x / scale))
        return rhs.__class__(b)

    def normalized(self, iszerofunc=_iszero):
        """Return the normalized version of ``self``.

        Parameters
        ==========

        iszerofunc : Function, optional
            A function to determine whether ``self`` is a zero vector.
            The default ``_iszero`` tests to see if each element is
            exactly zero.

        Returns
        =======

        Matrix
            Normalized vector form of ``self``.
            It has the same length as a unit vector. However, a zero vector
            will be returned for a vector with norm 0.

        Raises
        ======

        ShapeError
            If the matrix is not in a vector form.

        See Also
        ========

        norm
        """
        if self.rows != 1 and self.cols != 1:
            raise ShapeError("A Matrix must be a vector to normalize.")
        norm = self.norm()
        if iszerofunc(norm):
            out = self.zeros(self.rows, self.cols)
        else:
            out = self.applyfunc(lambda i: i / norm)
        return out

    def norm(self, ord=None):
        """Return the Norm of a Matrix or Vector.
        In the simplest case this is the geometric size of the vector
        Other norms can be specified by the ord parameter


        =====  ============================  ==========================
        ord    norm for matrices             norm for vectors
        =====  ============================  ==========================
        None   Frobenius norm                2-norm
        'fro'  Frobenius norm                - does not exist
        inf    maximum row sum               max(abs(x))
        -inf   --                            min(abs(x))
        1      maximum column sum            as below
        -1     --                            as below
        2      2-norm (largest sing. value)  as below
        -2     smallest singular value       as below
        other  - does not exist              sum(abs(x)**ord)**(1./ord)
        =====  ============================  ==========================

        Examples
        ========

        >>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
        >>> x = Symbol('x', real=True)
        >>> v = Matrix([cos(x), sin(x)])
        >>> trigsimp( v.norm() )
        1
        >>> v.norm(10)
        (sin(x)**10 + cos(x)**10)**(1/10)
        >>> A = Matrix([[1, 1], [1, 1]])
        >>> A.norm(1) # maximum sum of absolute values of A is 2
        2
        >>> A.norm(2) # Spectral norm (max of |Ax|/|x| under 2-vector-norm)
        2
        >>> A.norm(-2) # Inverse spectral norm (smallest singular value)
        0
        >>> A.norm() # Frobenius Norm
        2
        >>> A.norm(oo) # Infinity Norm
        2
        >>> Matrix([1, -2]).norm(oo)
        2
        >>> Matrix([-1, 2]).norm(-oo)
        1

        See Also
        ========

        normalized
        """
        # Row or Column Vector Norms
        vals = list(self.values()) or [0]
        if self.rows == 1 or self.cols == 1:
            if ord == 2 or ord is None:  # Common case sqrt(<x, x>)
                return sqrt(Add(*(abs(i) ** 2 for i in vals)))

            elif ord == 1:  # sum(abs(x))
                return Add(*(abs(i) for i in vals))

            elif ord is S.Infinity:  # max(abs(x))
                return Max(*[abs(i) for i in vals])

            elif ord is S.NegativeInfinity:  # min(abs(x))
                return Min(*[abs(i) for i in vals])

            # Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
            # Note that while useful this is not mathematically a norm
            try:
                return Pow(Add(*(abs(i) ** ord for i in vals)), S.One / ord)
            except (NotImplementedError, TypeError):
                raise ValueError("Expected order to be Number, Symbol, oo")

        # Matrix Norms
        else:
            if ord == 1:  # Maximum column sum
                m = self.applyfunc(abs)
                return Max(*[sum(m.col(i)) for i in range(m.cols)])

            elif ord == 2:  # Spectral Norm
                # Maximum singular value
                return Max(*self.singular_values())

            elif ord == -2:
                # Minimum singular value
                return Min(*self.singular_values())

            elif ord is S.Infinity:   # Infinity Norm - Maximum row sum
                m = self.applyfunc(abs)
                return Max(*[sum(m.row(i)) for i in range(m.rows)])

            elif (ord is None or isinstance(ord,
                                            str) and ord.lower() in
                ['f', 'fro', 'frobenius', 'vector']):
                # Reshape as vector and send back to norm function
                return self.vec().norm(ord=2)

            else:
                raise NotImplementedError("Matrix Norms under development")

    def pinv_solve(self, B, arbitrary_matrix=None):
        """Solve ``Ax = B`` using the Moore-Penrose pseudoinverse.

        There may be zero, one, or infinite solutions.  If one solution
        exists, it will be returned.  If infinite solutions exist, one will
        be returned based on the value of arbitrary_matrix.  If no solutions
        exist, the least-squares solution is returned.

        Parameters
        ==========

        B : Matrix
            The right hand side of the equation to be solved for.  Must have
            the same number of rows as matrix A.
        arbitrary_matrix : Matrix
            If the system is underdetermined (e.g. A has more columns than
            rows), infinite solutions are possible, in terms of an arbitrary
            matrix.  This parameter may be set to a specific matrix to use
            for that purpose; if so, it must be the same shape as x, with as
            many rows as matrix A has columns, and as many columns as matrix
            B.  If left as None, an appropriate matrix containing dummy
            symbols in the form of ``wn_m`` will be used, with n and m being
            row and column position of each symbol.

        Returns
        =======

        x : Matrix
            The matrix that will satisfy ``Ax = B``.  Will have as many rows as
            matrix A has columns, and as many columns as matrix B.

        Examples
        ========

        >>> from sympy import Matrix
        >>> A = Matrix([[1, 2, 3], [4, 5, 6]])
        >>> B = Matrix([7, 8])
        >>> A.pinv_solve(B)
        Matrix([
        [ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
        [-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
        [ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
        >>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
        Matrix([
        [-55/18],
        [   1/9],
        [ 59/18]])

        See Also
        ========

        lower_triangular_solve
        upper_triangular_solve
        gauss_jordan_solve
        cholesky_solve
        diagonal_solve
        LDLsolve
        LUsolve
        QRsolve
        pinv

        Notes
        =====

        This may return either exact solutions or least squares solutions.
        To determine which, check ``A * A.pinv() * B == B``.  It will be
        True if exact solutions exist, and False if only a least-squares
        solution exists.  Be aware that the left hand side of that equation
        may need to be simplified to correctly compare to the right hand
        side.

        References
        ==========

        .. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system

        """
        from sympy.matrices import eye
        A = self
        A_pinv = self.pinv()
        if arbitrary_matrix is None:
            rows, cols = A.cols, B.cols
            w = symbols('w:{0}_:{1}'.format(rows, cols), cls=Dummy)
            arbitrary_matrix = self.__class__(cols, rows, w).T
        return A_pinv * B + (eye(A.cols) - A_pinv * A) * arbitrary_matrix

    def _eval_pinv_full_rank(self):
        """Subroutine for full row or column rank matrices.

        For full row rank matrices, inverse of ``A * A.H`` Exists.
        For full column rank matrices, inverse of ``A.H * A`` Exists.

        This routine can apply for both cases by checking the shape
        and have small decision.
        """
        if self.is_zero_matrix:
            return self.H

        if self.rows >= self.cols:
            return (self.H * self).inv() * self.H
        else:
            return self.H * (self * self.H).inv()

    def _eval_pinv_rank_decomposition(self):
        """Subroutine for rank decomposition

        With rank decompositions, `A` can be decomposed into two full-
        rank matrices, and each matrix can take pseudoinverse
        individually.
        """
        if self.is_zero_matrix:
            return self.H

        B, C = self.rank_decomposition()

        Bp = B._eval_pinv_full_rank()
        Cp = C._eval_pinv_full_rank()

        return Cp * Bp

    def _eval_pinv_diagonalization(self):
        """Subroutine using diagonalization

        This routine can sometimes fail if SymPy's eigenvalue
        computation is not reliable.
        """
        if self.is_zero_matrix:
            return self.H

        A = self
        AH = self.H

        try:
            if self.rows >= self.cols:
                P, D = (AH * A).diagonalize(normalize=True)
                D_pinv = D.applyfunc(lambda x: 0 if _iszero(x) else 1 / x)
                return P * D_pinv * P.H * AH
            else:
                P, D = (A * AH).diagonalize(normalize=True)
                D_pinv = D.applyfunc(lambda x: 0 if _iszero(x) else 1 / x)
                return AH * P * D_pinv * P.H
        except MatrixError:
            raise NotImplementedError(
                'pinv for rank-deficient matrices where '
                'diagonalization of A.H*A fails is not supported yet.')


    def pinv(self, method='RD'):
        """Calculate the Moore-Penrose pseudoinverse of the matrix.

        The Moore-Penrose pseudoinverse exists and is unique for any matrix.
        If the matrix is invertible, the pseudoinverse is the same as the
        inverse.

        Parameters
        ==========

        method : String, optional
            Specifies the method for computing the pseudoinverse.

            If ``'RD'``, Rank-Decomposition will be used.

            If ``'ED'``, Diagonalization will be used.

        Examples
        ========

        Computing pseudoinverse by rank decomposition :

        >>> from sympy import Matrix
        >>> A = Matrix([[1, 2, 3], [4, 5, 6]])
        >>> A.pinv()
        Matrix([
        [-17/18,  4/9],
        [  -1/9,  1/9],
        [ 13/18, -2/9]])

        Computing pseudoinverse by diagonalization :

        >>> B = A.pinv(method='ED')
        >>> B.simplify()
        >>> B
        Matrix([
        [-17/18,  4/9],
        [  -1/9,  1/9],
        [ 13/18, -2/9]])

        See Also
        ========

        inv
        pinv_solve

        References
        ==========

        .. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse

        """
        # Trivial case: pseudoinverse of all-zero matrix is its transpose.
        if self.is_zero_matrix:
            return self.H

        if method == 'RD':
            return self._eval_pinv_rank_decomposition()
        elif method == 'ED':
            return self._eval_pinv_diagonalization()
        else:
            raise ValueError()

    def print_nonzero(self, symb="X"):
        """Shows location of non-zero entries for fast shape lookup.

        Examples
        ========

        >>> from sympy.matrices import Matrix, eye
        >>> m = Matrix(2, 3, lambda i, j: i*3+j)
        >>> m
        Matrix([
        [0, 1, 2],
        [3, 4, 5]])
        >>> m.print_nonzero()
        [ XX]
        [XXX]
        >>> m = eye(4)
        >>> m.print_nonzero("x")
        [x   ]
        [ x  ]
        [  x ]
        [   x]

        """
        s = []
        for i in range(self.rows):
            line = []
            for j in range(self.cols):
                if self[i, j] == 0:
                    line.append(" ")
                else:
                    line.append(str(symb))
            s.append("[%s]" % ''.join(line))
        print('\n'.join(s))

    def project(self, v):
        """Return the projection of ``self`` onto the line containing ``v``.

        Examples
        ========

        >>> from sympy import Matrix, S, sqrt
        >>> V = Matrix([sqrt(3)/2, S.Half])
        >>> x = Matrix([[1, 0]])
        >>> V.project(x)
        Matrix([[sqrt(3)/2, 0]])
        >>> V.project(-x)
        Matrix([[sqrt(3)/2, 0]])
        """
        return v * (self.dot(v) / v.dot(v))

    def QRdecomposition(self):
        """Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.

        Examples
        ========

        This is the example from wikipedia:

        >>> from sympy import Matrix
        >>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
        >>> Q, R = A.QRdecomposition()
        >>> Q
        Matrix([
        [ 6/7, -69/175, -58/175],
        [ 3/7, 158/175,   6/175],
        [-2/7,    6/35,  -33/35]])
        >>> R
        Matrix([
        [14,  21, -14],
        [ 0, 175, -70],
        [ 0,   0,  35]])
        >>> A == Q*R
        True

        QR factorization of an identity matrix:

        >>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        >>> Q, R = A.QRdecomposition()
        >>> Q
        Matrix([
        [1, 0, 0],
        [0, 1, 0],
        [0, 0, 1]])
        >>> R
        Matrix([
        [1, 0, 0],
        [0, 1, 0],
        [0, 0, 1]])

        See Also
        ========

        cholesky
        LDLdecomposition
        LUdecomposition
        QRsolve
        """
        cls = self.__class__
        mat = self.as_mutable()

        n = mat.rows
        m = mat.cols
        ranked = list()

        # Pad with additional rows to make wide matrices square
        # nOrig keeps track of original size so zeros can be trimmed from Q
        if n < m:
            nOrig = n
            n = m
            mat = mat.col_join(mat.zeros(n - nOrig, m))
        else:
            nOrig = n

        Q, R = mat.zeros(n, m), mat.zeros(m)
        for j in range(m):  # for each column vector
            tmp = mat[:, j]  # take original v
            for i in range(j):
                # subtract the project of mat on new vector
                R[i, j] = Q[:, i].dot(mat[:, j], hermitian=True)
                tmp -= Q[:, i] * R[i, j]
                tmp.expand()
            # normalize it
            R[j, j] = tmp.norm()
            if not R[j, j].is_zero:
                ranked.append(j)
                Q[:, j] = tmp / R[j, j]


        if len(ranked) != 0:
            return (
            cls(Q.extract(range(nOrig), ranked)),
            cls(R.extract(ranked, range(R.cols)))
            )
        else:
            # Trivial case handling for zero-rank matrix
            # Force Q as matrix containing standard basis vectors
            for i in range(Min(nOrig, m)):
                Q[i, i] = 1
            return (
            cls(Q.extract(range(nOrig), range(Min(nOrig, m)))),
            cls(R.extract(range(Min(nOrig, m)), range(R.cols)))
            )

    def QRsolve(self, b):
        """Solve the linear system ``Ax = b``.

        ``self`` is the matrix ``A``, the method argument is the vector
        ``b``.  The method returns the solution vector ``x``.  If ``b`` is a
        matrix, the system is solved for each column of ``b`` and the
        return value is a matrix of the same shape as ``b``.

        This method is slower (approximately by a factor of 2) but
        more stable for floating-point arithmetic than the LUsolve method.
        However, LUsolve usually uses an exact arithmetic, so you don't need
        to use QRsolve.

        This is mainly for educational purposes and symbolic matrices, for real
        (or complex) matrices use mpmath.qr_solve.

        See Also
        ========

        lower_triangular_solve
        upper_triangular_solve
        gauss_jordan_solve
        cholesky_solve
        diagonal_solve
        LDLsolve
        LUsolve
        pinv_solve
        QRdecomposition
        """

        Q, R = self.as_mutable().QRdecomposition()
        y = Q.T * b

        # back substitution to solve R*x = y:
        # We build up the result "backwards" in the vector 'x' and reverse it
        # only in the end.
        x = []
        n = R.rows
        for j in range(n - 1, -1, -1):
            tmp = y[j, :]
            for k in range(j + 1, n):
                tmp -= R[j, k] * x[n - 1 - k]
            x.append(tmp / R[j, j])
        return self._new([row._mat for row in reversed(x)])

    def rank_decomposition(self, iszerofunc=_iszero, simplify=False):
        r"""Returns a pair of matrices (`C`, `F`) with matching rank
        such that `A = C F`.

        Parameters
        ==========

        iszerofunc : Function, optional
            A function used for detecting whether an element can
            act as a pivot.  ``lambda x: x.is_zero`` is used by default.

        simplify : Bool or Function, optional
            A function used to simplify elements when looking for a
            pivot. By default SymPy's ``simplify`` is used.

        Returns
        =======

        (C, F) : Matrices
            `C` and `F` are full-rank matrices with rank as same as `A`,
            whose product gives `A`.

            See Notes for additional mathematical details.

        Examples
        ========

        >>> from sympy.matrices import Matrix
        >>> A = Matrix([
        ...     [1, 3, 1, 4],
        ...     [2, 7, 3, 9],
        ...     [1, 5, 3, 1],
        ...     [1, 2, 0, 8]
        ... ])
        >>> C, F = A.rank_decomposition()
        >>> C
        Matrix([
        [1, 3, 4],
        [2, 7, 9],
        [1, 5, 1],
        [1, 2, 8]])
        >>> F
        Matrix([
        [1, 0, -2, 0],
        [0, 1,  1, 0],
        [0, 0,  0, 1]])
        >>> C * F == A
        True

        Notes
        =====

        Obtaining `F`, an RREF of `A`, is equivalent to creating a
        product

        .. math::
            E_n E_{n-1} ... E_1 A = F

        where `E_n, E_{n-1}, ... , E_1` are the elimination matrices or
        permutation matrices equivalent to each row-reduction step.

        The inverse of the same product of elimination matrices gives
        `C`:

        .. math::
            C = (E_n E_{n-1} ... E_1)^{-1}

        It is not necessary, however, to actually compute the inverse:
        the columns of `C` are those from the original matrix with the
        same column indices as the indices of the pivot columns of `F`.

        References
        ==========

        .. [1] https://en.wikipedia.org/wiki/Rank_factorization

        .. [2] Piziak, R.; Odell, P. L. (1 June 1999).
            "Full Rank Factorization of Matrices".
            Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882

        See Also
        ========

        rref
        """
        (F, pivot_cols) = self.rref(
            simplify=simplify, iszerofunc=iszerofunc, pivots=True)
        rank = len(pivot_cols)

        C = self.extract(range(self.rows), pivot_cols)
        F = F[:rank, :]

        return (C, F)

    def solve_least_squares(self, rhs, method='CH'):
        """Return the least-square fit to the data.

        Parameters
        ==========

        rhs : Matrix
            Vector representing the right hand side of the linear equation.

        method : string or boolean, optional
            If set to ``'CH'``, ``cholesky_solve`` routine will be used.

            If set to ``'LDL'``, ``LDLsolve`` routine will be used.

            If set to ``'QR'``, ``QRsolve`` routine will be used.

            If set to ``'PINV'``, ``pinv_solve`` routine will be used.

            Otherwise, the conjugate of ``self`` will be used to create a system
            of equations that is passed to ``solve`` along with the hint
            defined by ``method``.

        Returns
        =======

        solutions : Matrix
            Vector representing the solution.

        Examples
        ========

        >>> from sympy.matrices import Matrix, ones
        >>> A = Matrix([1, 2, 3])
        >>> B = Matrix([2, 3, 4])
        >>> S = Matrix(A.row_join(B))
        >>> S
        Matrix([
        [1, 2],
        [2, 3],
        [3, 4]])

        If each line of S represent coefficients of Ax + By
        and x and y are [2, 3] then S*xy is:

        >>> r = S*Matrix([2, 3]); r
        Matrix([
        [ 8],
        [13],
        [18]])

        But let's add 1 to the middle value and then solve for the
        least-squares value of xy:

        >>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
        Matrix([
        [ 5/3],
        [10/3]])

        The error is given by S*xy - r:

        >>> S*xy - r
        Matrix([
        [1/3],
        [1/3],
        [1/3]])
        >>> _.norm().n(2)
        0.58

        If a different xy is used, the norm will be higher:

        >>> xy += ones(2, 1)/10
        >>> (S*xy - r).norm().n(2)
        1.5

        """
        if method == 'CH':
            return self.cholesky_solve(rhs)
        elif method == 'QR':
            return self.QRsolve(rhs)
        elif method == 'LDL':
            return self.LDLsolve(rhs)
        elif method == 'PINV':
            return self.pinv_solve(rhs)
        else:
            t = self.H
            return (t * self).solve(t * rhs, method=method)

    def solve(self, rhs, method='GJ', dotprodsimp=None):
        """Solves linear equation where the unique solution exists.

        Parameters
        ==========

        rhs : Matrix
            Vector representing the right hand side of the linear equation.

        method : string, optional
           If set to ``'GJ'``, the Gauss-Jordan elimination will be used, which
           is implemented in the routine ``gauss_jordan_solve``.

           If set to ``'LU'``, ``LUsolve`` routine will be used.

           If set to ``'QR'``, ``QRsolve`` routine will be used.

           If set to ``'PINV'``, ``pinv_solve`` routine will be used.

           It also supports the methods available for special linear systems

           For positive definite systems:

           If set to ``'CH'``, ``cholesky_solve`` routine will be used.

           If set to ``'LDL'``, ``LDLsolve`` routine will be used.

           To use a different method and to compute the solution via the
           inverse, use a method defined in the .inv() docstring.

        dotprodsimp : bool, optional
            Specifies whether intermediate term algebraic simplification is used
            during matrix multiplications to control expression blowup and thus
            speed up calculation.

        Returns
        =======

        solutions : Matrix
            Vector representing the solution.

        Raises
        ======

        ValueError
            If there is not a unique solution then a ``ValueError`` will be
            raised.

            If ``self`` is not square, a ``ValueError`` and a different routine
            for solving the system will be suggested.
        """

        if method == 'GJ':
            try:
                soln, param = self.gauss_jordan_solve(rhs, dotprodsimp=dotprodsimp)
                if param:
                    raise NonInvertibleMatrixError("Matrix det == 0; not invertible. "
                    "Try ``self.gauss_jordan_solve(rhs)`` to obtain a parametric solution.")
            except ValueError:
                raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
            return soln
        elif method == 'LU':
            return self.LUsolve(rhs, dotprodsimp=dotprodsimp)
        elif method == 'CH':
            return self.cholesky_solve(rhs, dotprodsimp=dotprodsimp)
        elif method == 'QR':
            return self.QRsolve(rhs)
        elif method == 'LDL':
            return self.LDLsolve(rhs, dotprodsimp=dotprodsimp)
        elif method == 'PINV':
            return self.pinv_solve(rhs)
        else:
            return self.inv(method=method, dotprodsimp=dotprodsimp) \
                    .multiply(rhs, dotprodsimp=dotprodsimp)

    def table(self, printer, rowstart='[', rowend=']', rowsep='\n',
              colsep=', ', align='right'):
        r"""
        String form of Matrix as a table.

        ``printer`` is the printer to use for on the elements (generally
        something like StrPrinter())

        ``rowstart`` is the string used to start each row (by default '[').

        ``rowend`` is the string used to end each row (by default ']').

        ``rowsep`` is the string used to separate rows (by default a newline).

        ``colsep`` is the string used to separate columns (by default ', ').

        ``align`` defines how the elements are aligned. Must be one of 'left',
        'right', or 'center'.  You can also use '<', '>', and '^' to mean the
        same thing, respectively.

        This is used by the string printer for Matrix.

        Examples
        ========

        >>> from sympy import Matrix
        >>> from sympy.printing.str import StrPrinter
        >>> M = Matrix([[1, 2], [-33, 4]])
        >>> printer = StrPrinter()
        >>> M.table(printer)
        '[  1, 2]\n[-33, 4]'
        >>> print(M.table(printer))
        [  1, 2]
        [-33, 4]
        >>> print(M.table(printer, rowsep=',\n'))
        [  1, 2],
        [-33, 4]
        >>> print('[%s]' % M.table(printer, rowsep=',\n'))
        [[  1, 2],
        [-33, 4]]
        >>> print(M.table(printer, colsep=' '))
        [  1 2]
        [-33 4]
        >>> print(M.table(printer, align='center'))
        [ 1 , 2]
        [-33, 4]
        >>> print(M.table(printer, rowstart='{', rowend='}'))
        {  1, 2}
        {-33, 4}
        """
        # Handle zero dimensions:
        if self.rows == 0 or self.cols == 0:
            return '[]'
        # Build table of string representations of the elements
        res = []
        # Track per-column max lengths for pretty alignment
        maxlen = [0] * self.cols
        for i in range(self.rows):
            res.append([])
            for j in range(self.cols):
                s = printer._print(self[i, j])
                res[-1].append(s)
                maxlen[j] = max(len(s), maxlen[j])
        # Patch strings together
        align = {
            'left': 'ljust',
            'right': 'rjust',
            'center': 'center',
            '<': 'ljust',
            '>': 'rjust',
            '^': 'center',
        }[align]
        for i, row in enumerate(res):
            for j, elem in enumerate(row):
                row[j] = getattr(elem, align)(maxlen[j])
            res[i] = rowstart + colsep.join(row) + rowend
        return rowsep.join(res)

    def upper_triangular_solve(self, rhs, dotprodsimp=None):
        """Solves ``Ax = B``, where A is an upper triangular matrix.

        See Also
        ========

        lower_triangular_solve
        gauss_jordan_solve
        cholesky_solve
        diagonal_solve
        LDLsolve
        LUsolve
        QRsolve
        pinv_solve
        """
        if not self.is_square:
            raise NonSquareMatrixError("Matrix must be square.")
        if rhs.rows != self.rows:
            raise TypeError("Matrix size mismatch.")
        if not self.is_upper:
            raise TypeError("Matrix is not upper triangular.")
        return self._upper_triangular_solve(rhs, dotprodsimp=dotprodsimp)

    def vech(self, diagonal=True, check_symmetry=True):
        """Return the unique elements of a symmetric Matrix as a one column matrix
        by stacking the elements in the lower triangle.

        Arguments:
        diagonal -- include the diagonal cells of ``self`` or not
        check_symmetry -- checks symmetry of ``self`` but not completely reliably

        Examples
        ========

        >>> from sympy import Matrix
        >>> m=Matrix([[1, 2], [2, 3]])
        >>> m
        Matrix([
        [1, 2],
        [2, 3]])
        >>> m.vech()
        Matrix([
        [1],
        [2],
        [3]])
        >>> m.vech(diagonal=False)
        Matrix([[2]])

        See Also
        ========

        vec
        """
        from sympy.matrices import zeros

        c = self.cols
        if c != self.rows:
            raise ShapeError("Matrix must be square")
        if check_symmetry:
            self.simplify()
            if self != self.transpose():
                raise ValueError(
                    "Matrix appears to be asymmetric; consider check_symmetry=False")
        count = 0
        if diagonal:
            v = zeros(c * (c + 1) // 2, 1)
            for j in range(c):
                for i in range(j, c):
                    v[count] = self[i, j]
                    count += 1
        else:
            v = zeros(c * (c - 1) // 2, 1)
            for j in range(c):
                for i in range(j + 1, c):
                    v[count] = self[i, j]
                    count += 1
        return v

@deprecated(
    issue=15109,
    useinstead="from sympy.matrices.common import classof",
    deprecated_since_version="1.3")
def classof(A, B):
    from sympy.matrices.common import classof as classof_
    return classof_(A, B)

@deprecated(
    issue=15109,
    deprecated_since_version="1.3",
    useinstead="from sympy.matrices.common import a2idx")
def a2idx(j, n=None):
    from sympy.matrices.common import a2idx as a2idx_
    return a2idx_(j, n)
