#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html

"""This module contains implementations of basic interfaces used across the whole gensim package.
These interfaces usable for building corpus, transformation and similarity classes.

All interfaces are realized as abstract base classes (i.e. some optional functionality is provided in the interface
itself, so that the interfaces should be inherited).

"""

from __future__ import with_statement

import logging

from gensim import utils, matutils
from six.moves import xrange


logger = logging.getLogger(__name__)


class CorpusABC(utils.SaveLoad):
    """Interface for corpus classes from :mod:`gensim.corpora`.

    Corpus is simply an iterable object, where each iteration step yields one document:

    >>> from gensim.corpora import MmCorpus  # this is inheritor of CorpusABC class
    >>> from gensim.test.utils import datapath
    >>>
    >>> corpus = MmCorpus(datapath("testcorpus.mm"))
    >>> for doc in corpus:
    ...     pass # do something with the doc...

    A document represented in bag-of-word (BoW) format, i.e. list of (attr_id, attr_value),
    like ``[(1, 0.2), (4, 0.6), ...]``.

    >>> from gensim.corpora import MmCorpus  # this is inheritor of CorpusABC class
    >>> from gensim.test.utils import datapath
    >>>
    >>> corpus = MmCorpus(datapath("testcorpus.mm"))
    >>> doc = next(iter(corpus))
    >>> print(doc)
    [(0, 1.0), (1, 1.0), (2, 1.0)]

    Remember, that save/load methods save only corpus class (not corpus as data itself),
    for save/load functionality, please use this pattern :

    >>> from gensim.corpora import MmCorpus  # this is inheritor of CorpusABC class
    >>> from gensim.test.utils import datapath, get_tmpfile
    >>>
    >>> corpus = MmCorpus(datapath("testcorpus.mm"))
    >>> tmp_path = get_tmpfile("temp_corpus.mm")
    >>>
    >>> MmCorpus.serialize(tmp_path, corpus)  #  serialize corpus to disk in MmCorpus format
    >>> # MmCorpus.save_corpus(tmp_path, corpus)  # this variant also possible, but if serialize availbe - call it.
    >>> loaded_corpus = MmCorpus(tmp_path)  # load corpus through constructor
    >>> for (doc_1, doc_2) in zip(corpus, loaded_corpus):
    ...     assert doc_1 == doc_2  # check that corpuses exactly same


    See Also
    --------
    :mod:`gensim.corpora`
        Corpuses in different formats

    """

    def __iter__(self):
        """Iterate over corpus, **should be overridden in inheritor class**.

        Raises
        ------
        NotImplementedError
            Since it's abstract class this iterator protocol should be overwritten in the inherited class.

        """
        raise NotImplementedError('cannot instantiate abstract base class')

    def save(self, *args, **kwargs):
        """Saves corpus in-memory state.

        Warnings
        --------
        This save only "state" of corpus class (not corpus-data at all),
        for saving data please use :meth:`~gensim.interfaces.CorpusABC.save_corpus` instead`.

        Parameters
        ----------
        *args
            Variable length argument list.
        **kwargs
            Arbitrary keyword arguments.

        """
        import warnings
        warnings.warn(
            "corpus.save() stores only the (tiny) iteration object; "
            "to serialize the actual corpus content, use e.g. MmCorpus.serialize(corpus)"
        )
        super(CorpusABC, self).save(*args, **kwargs)

    def __len__(self):
        """Get size of the corpus (number of documents), **should be overridden in inheritor class**.

        Raises
        ------
        NotImplementedError
            Since it's abstract class this method should be reimplemented later.

        """
        raise NotImplementedError("must override __len__() before calling len(corpus)")

    @staticmethod
    def save_corpus(fname, corpus, id2word=None, metadata=False):
        """Saves given `corpus` to disk, **should be overridden in inheritor class**.

        Some formats support saving the dictionary (`feature_id -> word` mapping),
        which can be provided by the optional `id2word` parameter.

        Notes
        -----
        Some corpus also support an index of where each document begins, so that the documents on disk
        can be accessed in O(1) time (see the :class:`gensim.corpora.indexedcorpus.IndexedCorpus` base class).
        In this case, :meth:`~gensim.interfaces.CorpusABC.save_corpus` is automatically called internally by
        :func:`serialize`, which does :meth:`~gensim.interfaces.CorpusABC.save_corpus` plus saves the index
        at the same time.

        Calling :func:`serialize() is preferred to calling :meth:`~gensim.interfaces.CorpusABC.save_corpus`.

        Parameters
        ----------
        fname : str
            Path to output file.
        corpus : iterable of list of (int, number)
            Corpus in BoW format.
        id2word : :class:`~gensim.corpora.Dictionary`, optional
            Dictionary of corpus.
        metadata : bool, optional
            If True, will write some meta-information to `fname` too.

        """
        raise NotImplementedError('cannot instantiate abstract base class')


class TransformedCorpus(CorpusABC):
    """Interface for corpus supports transformations."""
    def __init__(self, obj, corpus, chunksize=None, **kwargs):
        """

        Parameters
        ----------
        obj : object
            Some corpus class from :mod:`gensim.corpora`.
        corpus : iterable of list of (int, number)
            Corpus in BoW format.
        chunksize : int, optional
            If provided - more effective processing (by group of documents) will performed.
        kwargs
            Arbitrary keyword arguments.

        """
        self.obj, self.corpus, self.chunksize = obj, corpus, chunksize
        # add the new parameters like per_word_topics to base class object of LdaModel
        for key, value in kwargs.items():
            setattr(self.obj, key, value)
        self.metadata = False

    def __len__(self):
        """Get size of the corpus."""
        return len(self.corpus)

    def __iter__(self):
        """Iterate over the corpus.

        If `chunksize` is set, works in "batch-manner" (more efficient).

        Yields
        ------
        list of (int, number)
            Document in BoW format

        """
        if self.chunksize:
            for chunk in utils.grouper(self.corpus, self.chunksize):
                for transformed in self.obj.__getitem__(chunk, chunksize=None):
                    yield transformed
        else:
            for doc in self.corpus:
                yield self.obj[doc]

    def __getitem__(self, docno):
        """Get element from corpus index `docno`.

        Parameters
        ----------
        docno : int
            Index of document in corpus.

        Returns
        -------
        list of (int, number)
            Document in BoW format

        Raises
        ------
        RuntimeError
            If corpus doesn't support slicing (:meth`__getitem__` doesn't exists).

        """
        if hasattr(self.corpus, '__getitem__'):
            return self.obj[self.corpus[docno]]
        else:
            raise RuntimeError('Type {} does not support slicing.'.format(type(self.corpus)))


class TransformationABC(utils.SaveLoad):
    """Transformation interface.

    A 'transformation' is any object which accepts document in BoW format via the `__getitem__` (notation `[]`)
    and returns another sparse document in its stead:

    >>> from gensim.models import LsiModel
    >>> from gensim.test.utils import common_dictionary, common_corpus
    >>>
    >>> model = LsiModel(common_corpus, id2word=common_dictionary)
    >>> bow_vector = model[common_corpus[0]]  # model applied through __getitem__ on document from corpus.
    >>> bow_corpus = model[common_corpus]  # also, we can apply model on full corpus


    """

    def __getitem__(self, vec):
        """Get element of `transformations`, **should be overridden in inheritor class**.

        Transforms vector from one vector space into another **or** whole corpus into another.

        Parameters
        ----------
        vec : object
            Given vector.

        Raises
        ------
        NotImplementedError
            Since it's abstract class this method should be reimplemented later.

        """
        raise NotImplementedError('cannot instantiate abstract base class')

    def _apply(self, corpus, chunksize=None, **kwargs):
        """Apply the transformation to a whole corpus and get the result as another corpus.

        Parameters
        ----------
        corpus : iterable of list of (int, number)
            Corpus in BoW format.
        chunksize : int, optional
            If provided - more effective processing (by group of documents) will performed.
        kwargs
            Arbitrary keyword arguments.

        Returns
        -------
        :class:`~gensim.interfaces.TransformedCorpus`
            Transformed corpus.

        """
        return TransformedCorpus(self, corpus, chunksize, **kwargs)


class SimilarityABC(utils.SaveLoad):
    """Interface for similarity search over a corpus.

    In all instances, there is a corpus against which we want to perform the similarity search.
    For each similarity search, the input is a document and the output are its similarities
    to individual corpus documents.


    Examples
    --------
    >>> from gensim.similarities import MatrixSimilarity
    >>> from gensim.test.utils import common_dictionary, common_corpus
    >>>
    >>> index = MatrixSimilarity(common_corpus)
    >>> similarities = index.get_similarities(common_corpus[1])  # get similarities between query and corpus

    Notes
    -----
    There is also a convenience wrapper, where iterating over `self` yields similarities of each document in the corpus
    against the whole corpus (i.e. the query is each corpus document in turn).

    See Also
    --------
    :mod:`gensim.similarities`
        Provided different type of indexes for search.

    """

    def __init__(self, corpus):
        """Initialization of object, **should be overridden in inheritor class**.

        Parameters
        ----------
        corpus : iterable of list of (int, number)
            Corpus in BoW format.

        Raises
        ------
        NotImplementedError
            Since it's abstract class this method should be reimplemented later.

        """
        raise NotImplementedError("cannot instantiate Abstract Base Class")

    def get_similarities(self, doc):
        """Get similarity measures of documents of corpus to given `doc`, **should be overridden in inheritor class**.

        Parameters
        ----------
        doc : list of (int, number)
            Document in BoW format.

        Raises
        ------
        NotImplementedError
            Since it's abstract class this method should be reimplemented later.

        """
        raise NotImplementedError("cannot instantiate Abstract Base Class")

    def __getitem__(self, query):
        """Get access to similarities of document/corpus `query` to all documents in the corpus.

        Using :meth:`~gensim.interfaces.SimilarityABC.get_similarities`


        Notes
        -----
        Passing corpus to `query` (instead of document) can be more efficient, because will processed in batching-way.

        Parameters
        ----------
        query : {list of (int, int), iterable of list of (int, int)}
            Document or corpus in BoW format.

        Returns
        -------
        {`scipy.sparse.csr.csr_matrix`, list of (int, float)}
            Similarities given document or corpus and objects corpus, depends on `query`.

        """
        is_corpus, query = utils.is_corpus(query)
        if self.normalize:
            # self.normalize only works if the input is a plain gensim vector/corpus (as
            # advertised in the doc). in fact, input can be a numpy or scipy.sparse matrix
            # as well, but in that case assume tricks are happening and don't normalize
            # anything (self.normalize has no effect).
            if not matutils.ismatrix(query):
                if is_corpus:
                    query = [matutils.unitvec(v) for v in query]
                else:
                    query = matutils.unitvec(query)
        result = self.get_similarities(query)

        if self.num_best is None:
            return result

        # if maintain_sparsity is True, result is scipy sparse. Sort, clip the
        # topn and return as a scipy sparse matrix.
        if getattr(self, 'maintain_sparsity', False):
            return matutils.scipy2scipy_clipped(result, self.num_best)

        # if the input query was a corpus (=more documents), compute the top-n
        # most similar for each document in turn
        if matutils.ismatrix(result):
            return [matutils.full2sparse_clipped(v, self.num_best) for v in result]
        else:
            # otherwise, return top-n of the single input document
            return matutils.full2sparse_clipped(result, self.num_best)

    def __iter__(self):
        """Iterate over all documents, computes similarity against all other documents in the index.

        Yields
        ------
        {`scipy.sparse.csr.csr_matrix`, list of (int, float)}
            Similarity of current document and all documents of corpus.

        """
        # turn off query normalization (vectors in the index are assumed to be already normalized)
        norm = self.normalize
        self.normalize = False

        # Try to compute similarities in bigger chunks of documents (not
        # one query = a single document after another). The point is, a
        # bigger query of N documents is faster than N small queries of one
        # document.
        #
        # After computing similarities of the bigger query in `self[chunk]`,
        # yield the resulting similarities one after another, so that it looks
        # exactly the same as if they had been computed with many small queries.
        try:
            chunking = self.chunksize > 1
        except AttributeError:
            # chunking not supported; fall back to the (slower) mode of 1 query=1 document
            chunking = False
        if chunking:
            # assumes `self.corpus` holds the index as a 2-d numpy array.
            # this is true for MatrixSimilarity and SparseMatrixSimilarity, but
            # may not be true for other (future) classes..?
            for chunk_start in xrange(0, self.index.shape[0], self.chunksize):
                # scipy.sparse doesn't allow slicing beyond real size of the matrix
                # (unlike numpy). so, clip the end of the chunk explicitly to make
                # scipy.sparse happy
                chunk_end = min(self.index.shape[0], chunk_start + self.chunksize)
                chunk = self.index[chunk_start: chunk_end]
                for sim in self[chunk]:
                    yield sim
        else:
            for doc in self.index:
                yield self[doc]

        # restore old normalization value
        self.normalize = norm
