// This file is part of Eigen, a lightweight C++ template library
// for linear algebra. Eigen itself is part of the KDE project.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.

#ifndef EIGEN_COMPRESSED_STORAGE_H
#define EIGEN_COMPRESSED_STORAGE_H

/** Stores a sparse set of values as a list of values and a list of indices.
  *
  */
template<typename Scalar>
class CompressedStorage
{
    typedef typename NumTraits<Scalar>::Real RealScalar;
  public:
    CompressedStorage()
      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
    {}

    CompressedStorage(std::size_t size)
      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
    {
      resize(size);
    }

    CompressedStorage(const CompressedStorage& other)
      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
    {
      *this = other;
    }

    CompressedStorage& operator=(const CompressedStorage& other)
    {
      resize(other.size());
	  std::memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
	  std::memcpy(m_indices, other.m_indices, m_size * sizeof(int));
      return *this;
    }

    void swap(CompressedStorage& other)
    {
      std::swap(m_values, other.m_values);
      std::swap(m_indices, other.m_indices);
      std::swap(m_size, other.m_size);
      std::swap(m_allocatedSize, other.m_allocatedSize);
    }

    ~CompressedStorage()
    {
      delete[] m_values;
      delete[] m_indices;
    }

    void reserve(std::size_t size)
    {
      std::size_t newAllocatedSize = m_size + size;
      if (newAllocatedSize > m_allocatedSize)
        reallocate(newAllocatedSize);
    }

    void squeeze()
    {
      if (m_allocatedSize>m_size)
        reallocate(m_size);
    }

    void resize(std::size_t size, float reserveSizeFactor = 0)
    {
      if (m_allocatedSize<size)
        reallocate(size + std::size_t(reserveSizeFactor*size));
      m_size = size;
    }

    void append(const Scalar& v, int i)
    {
      int id = m_size;
      resize(m_size+1, 1);
      m_values[id] = v;
      m_indices[id] = i;
    }

    inline std::size_t size() const { return m_size; }
    inline std::size_t allocatedSize() const { return m_allocatedSize; }
    inline void clear() { m_size = 0; }

    inline Scalar& value(std::size_t i) { return m_values[i]; }
    inline const Scalar& value(std::size_t i) const { return m_values[i]; }

    inline int& index(std::size_t i) { return m_indices[i]; }
    inline const int& index(std::size_t i) const { return m_indices[i]; }

    static CompressedStorage Map(int* indices, Scalar* values, std::size_t size)
    {
      CompressedStorage res;
      res.m_indices = indices;
      res.m_values = values;
      res.m_allocatedSize = res.m_size = size;
      return res;
    }
    
    /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
    inline int searchLowerIndex(int key) const
    {
      return searchLowerIndex(0, m_size, key);
    }
    
    /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
    inline int searchLowerIndex(std::size_t start, std::size_t end, int key) const
    {
      while(end>start)
      {
        std::size_t mid = (end+start)>>1;
        if (m_indices[mid]<key)
          start = mid+1;
        else
          end = mid;
      }
      return start;
    }
    
    /** \returns the stored value at index \a key
      * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
    inline Scalar at(int key, Scalar defaultValue = Scalar(0)) const
    {
      if (m_size==0)
        return defaultValue;
      else if (key==m_indices[m_size-1])
        return m_values[m_size-1];
      // ^^  optimization: let's first check if it is the last coefficient
      // (very common in high level algorithms)
      const std::size_t id = searchLowerIndex(0,m_size-1,key);
      return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
    }
    
    /** Like at(), but the search is performed in the range [start,end) */
    inline Scalar atInRange(std::size_t start, std::size_t end, int key, Scalar defaultValue = Scalar(0)) const
    {
      if (start==end)
        return Scalar(0);
      else if (end>start && key==m_indices[end-1])
        return m_values[end-1];
      // ^^  optimization: let's first check if it is the last coefficient
      // (very common in high level algorithms)
      const std::size_t id = searchLowerIndex(start,end-1,key);
      return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
    }
    
    /** \returns a reference to the value at index \a key
      * If the value does not exist, then the value \a defaultValue is inserted
      * such that the keys are sorted. */
    inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0))
    {
      std::size_t id = searchLowerIndex(0,m_size,key);
      if (id>=m_size || m_indices[id]!=key)
      {
        resize(m_size+1,1);
        for (std::size_t j=m_size-1; j>id; --j)
        {
          m_indices[j] = m_indices[j-1];
          m_values[j] = m_values[j-1];
        }
        m_indices[id] = key;
        m_values[id] = defaultValue;
      }
      return m_values[id];
    }
    
    void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
    {
      std::size_t k = 0;
      std::size_t n = size();
      for (std::size_t i=0; i<n; ++i)
      {
        if (!ei_isMuchSmallerThan(value(i), reference, epsilon))
        {
          value(k) = value(i);
          index(k) = index(i);
          ++k;
        }
      }
      resize(k,0);
    }

  protected:

    inline void reallocate(std::size_t size)
    {
      Scalar* newValues  = new Scalar[size];
      int* newIndices = new int[size];
      std::size_t copySize = std::min(size, m_size);
      // copy
	  std::memcpy(newValues,  m_values,  copySize * sizeof(Scalar));
	  std::memcpy(newIndices, m_indices, copySize * sizeof(int));
      // delete old stuff
      delete[] m_values;
      delete[] m_indices;
      m_values = newValues;
      m_indices = newIndices;
      m_allocatedSize = size;
    }

  protected:
    Scalar* m_values;
    int* m_indices;
    std::size_t m_size;
    std::size_t m_allocatedSize;

};

#endif // EIGEN_COMPRESSED_STORAGE_H
