/*=====================================================================*
 *                   Copyright (C) 2011 Paul Mineiro                   *
 * All rights reserved.                                                *
 *                                                                     *
 * Redistribution and use in source and binary forms, with             *
 * or without modification, are permitted provided that the            *
 * following conditions are met:                                       *
 *                                                                     *
 *     * Redistributions of source code must retain the                *
 *     above copyright notice, this list of conditions and             *
 *     the following disclaimer.                                       *
 *                                                                     *
 *     * Redistributions in binary form must reproduce the             *
 *     above copyright notice, this list of conditions and             *
 *     the following disclaimer in the documentation and/or            *
 *     other materials provided with the distribution.                 *
 *                                                                     *
 *     * Neither the name of Paul Mineiro nor the names                *
 *     of other contributors may be used to endorse or promote         *
 *     products derived from this software without specific            *
 *     prior written permission.                                       *
 *                                                                     *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND              *
 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,         *
 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES               *
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE             *
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER               *
 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,                 *
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES            *
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE           *
 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR                *
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF          *
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT           *
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY              *
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE             *
 * POSSIBILITY OF SUCH DAMAGE.                                         *
 *                                                                     *
 * Contact: Paul Mineiro <paul@mineiro.com>                            *
 *=====================================================================*/

#ifndef __REGRESSOR_HH__
#define __REGRESSOR_HH__

#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <cerrno>
#include <cmath>
#include <cstring>
#include <ostream>
#include <stdexcept>
#include <string>
#include <utility>

#include <ctype.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>

#include "example.hh"
#include "parse.hh"
#include "loss.hh"

#include "frsqrt.hh"

namespace flassol
{
  class Regressor
    {
      public:
        struct Weight
          {
            float value;
            float sum_sq_grad;
          };

        struct NGramSpec
          {
            unsigned int n : 4;
            unsigned int k : 4;

            NGramSpec () : n (0), k (0) { }
            NGramSpec (uint8_t _n, uint8_t _k) : n (_n), k (_k) { }
          };

        struct DotProductSpec
          {
            uint8_t  a;
            uint8_t  b;
            uint16_t k;

            DotProductSpec () : a (0), b (0), k (0) { }
            DotProductSpec (uint8_t  _a,
                            uint8_t  _b,
                            uint16_t _k)
              : a (_a),
                b (_b),
                k (_k)
              {
              }
          };

        struct Header
          {
            uint64_t                            version;
            uint64_t                            num_weights;
            uint64_t                            num_quadratic;
            std::pair<uint8_t, uint8_t>         quadratic[256];
            uint64_t                            num_ngram;
            std::pair<uint8_t, NGramSpec>       ngram[256];
            uint64_t                            num_dotproduct;
            DotProductSpec                      dotproduct[256];
            uint8_t                             hash_option;
            uint8_t                             constant_feature;
	    char   			        loss[62];
            uint64_t                            num_classes;
            uint64_t                            unused[16];

            Header () : version (3),
                        num_weights (1 << 16),
                        num_quadratic (0),
                        num_ngram (0),
                        num_dotproduct (0),
                        hash_option (StandardParse::STRING),
                        constant_feature (1),
                        num_classes (2)
              {
                memset (quadratic, 0, sizeof (quadratic));
                memset (ngram, 0, sizeof (ngram));
                memset (dotproduct, 0, sizeof (dotproduct));
                memset (loss, 0, sizeof (loss));
                memset (unused, 0, sizeof (unused));
              }

            bool
            operator== (const Header& o) const
              {
                return memcmp (this, &o, sizeof (Header)) == 0;
              }

            uint64_t
            file_size () const
              {
                return sizeof (Header) + num_weights * sizeof (Weight);
              }

            static void
            output_uint8_t (std::ostream& o,
                            uint8_t       x)
              {
                if (isgraph (x))
                  {
                    o << x;
                  }
                else
                  {
                    o << "$'\\" << std::oct 
                      << static_cast<unsigned int> (x)
                      << "'" << std::dec;
                  }
              }

            friend std::ostream&
            operator<< (std::ostream& o,
                        const Header& h)
              {
                o << "num_weights = " << h.num_weights << std::endl;
                o << "quadratic = ";
                for (uint64_t n = 0; n < h.num_quadratic; ++n)
                  {
                    output_uint8_t (o, h.quadratic[n].first);
                    output_uint8_t (o, h.quadratic[n].second);
                    o << " ";
                  }
                o << std::endl;

                o << "ngram = ";
                for (uint64_t n = 0; n < h.num_ngram; ++n)
                  {
                    output_uint8_t (o, h.ngram[n].first);
                    o << h.ngram[n].second.n 
                      << "," << h.ngram[n].second.k
                      << " ";
                  }
                o << std::endl;

                o << "dotproduct = ";
                for (uint64_t n = 0; n < h.num_dotproduct; ++n)
                  {
                    output_uint8_t (o, h.dotproduct[n].a);
                    output_uint8_t (o, h.dotproduct[n].b);
                    o << ":" << h.dotproduct[n].k << " ";
                  }
                o << std::endl;

		o << "loss = " << h.loss << std::endl;

                if (h.hash_option == StandardParse::STRING)
                  {
                    o << "hash_option = string" << std::endl;
                  }
                else
                  {
                    o << "hash_option = all" << std::endl;
                  }

                o << "constant feature = " 
                  << ((h.constant_feature) ? "true" : "false")
                  << std::endl;

                o << "num_classes = " << h.num_classes << std::endl;

                return o;
              }
          };

        class Open
          {
            private:
              std::string pathname;
              Header header;
              bool is_read_only;

            public:
              friend class Regressor;

              Open (const std::string& _pathname) 
                : pathname (_pathname),
                  is_read_only (true)
                {
                }

              Open& 
              num_weights (uint64_t _num_weights)
                {
                  header.num_weights = _num_weights;
                  return *this;
                }

	      Open&
	      loss (const std::string& l)
		{
		  if (1 + l.size () > sizeof (header.loss))
		    {
		      throw std::runtime_error ("loss function id too long");
		    }

		  memcpy (header.loss, l.c_str (), l.size ());
		  header.loss[l.size ()] = 0;

		  return *this;
		}

              Open&
              hash_option (const StandardParse::hash_option_value& ho)
                {
                  header.hash_option = ho;
                  return *this;
                }

              Open&
              add_quadratic (uint8_t a,
                             uint8_t b)
                {
                  if (header.num_quadratic > 255)
                    {
                      throw std::runtime_error ("num quadratic overflow");
                    }

                  header.quadratic[header.num_quadratic] = 
                    std::make_pair (a, b);
                  ++header.num_quadratic;

                  return *this;
                }

              Open&
              add_quadratic (const std::vector<std::pair<uint8_t, uint8_t> >& q)
                {
                  for (std::vector<std::pair<uint8_t, uint8_t> >::const_iterator x = q.begin ();
                       x != q.end ();
                       ++x)
                    {
                      add_quadratic (x->first, x->second);
                    }

                  return *this;
                }

              Open&
              add_dotproduct (DotProductSpec spec)
                {
                  if (header.num_dotproduct > 255)
                    {
                      throw std::runtime_error ("num dotproduct overflow");
                    }

                  header.dotproduct[header.num_dotproduct] = spec;
                  ++header.num_dotproduct;

                  return *this;
                }

              Open&
              add_dotproduct (const std::vector<DotProductSpec>& dp)
                {
                  for (std::vector<DotProductSpec>::const_iterator x = dp.begin ();
                       x != dp.end ();
                       ++x)
                    {
                      add_dotproduct (*x);
                    }

                  return *this;
                }

              Open&
              add_ngram (uint8_t   a,
                         NGramSpec spec)
                {
                  if (header.num_ngram > 255)
                    {
                      throw std::runtime_error ("num ngram overflow");
                    }

                  header.ngram[header.num_ngram] = std::make_pair (a, spec);
                  ++header.num_ngram;

                  return *this;
                }

              Open&
              add_ngram (const std::vector<std::pair<uint8_t, NGramSpec> >& n)
                {
                  for (std::vector<std::pair<uint8_t, NGramSpec> >::const_iterator x = n.begin ();
                       x != n.end ();
                       ++x)
                    {
                      add_ngram (x->first, x->second);
                    }

                  return *this;
                }

              Open& 
              read_only (bool value)
                {
                  is_read_only = value;
                  return *this;
                }

              Open& 
              constant_feature (bool value)
                {
                  header.constant_feature = value;
                  return *this;
                }

              Open& 
              num_classes (uint64_t value)
                {
                  header.num_classes = value;
                  return *this;
                }
          };

      private:
        int fd;
        uint8_t* base;
        Header* header;
        Weight* weights;

        void 
        initialize_db_from_fd (bool read_only)
          {
            Header tmp_hdr;

            if (lseek (fd, 0, SEEK_SET) == (off_t) -1 || 
                read (fd, &tmp_hdr, sizeof (tmp_hdr)) < (ssize_t) sizeof (tmp_hdr))
              {
                throw std::runtime_error (strerror (errno));
              }

            if (tmp_hdr.version != 3)
              {
                throw std::runtime_error ("document db version mismatch");
              }
              
            if (lseek (fd, 0, SEEK_SET) == (off_t) -1)
              {
                throw std::runtime_error (strerror (errno));
              }

            base = 
              reinterpret_cast<uint8_t*> 
                (mmap (NULL,
                       tmp_hdr.file_size (),
                       PROT_READ | (read_only ? 0 : PROT_WRITE),
                       MAP_SHARED,
                       fd,
                       0));

            if (base == reinterpret_cast<uint8_t*> (-1))
              {
                throw std::runtime_error (strerror (errno));
              }

            header = reinterpret_cast<Header*> (base);
            weights = reinterpret_cast<Weight*> (base + sizeof (Header));

            posix_madvise (base, header->file_size (), POSIX_MADV_RANDOM);
          }

        NGramSpec 
        get_ngram_spec (uint8_t ns) const
          {
            for (unsigned int n = 0; n < header->num_ngram; ++n)
              {
                if (header->ngram[n].first == ns)
                  {
                    return header->ngram[n].second;
                  }
              }

            return NGramSpec (1, 0);
          }

        bool
        is_dotproducted (uint8_t ns) const
          {
            for (unsigned int n = 0; n < header->num_dotproduct; ++n)
              {
                if (header->dotproduct[n].a == ns ||
                    header->dotproduct[n].b == ns)
                  {
                    return true;
                  }
              }

            return false;
          }

        class NGramConstIterator
          {
            private:
              const FeatureSet& f;
              NGramSpec         spec;
              unsigned int      cur_f;
              unsigned int      cur_n;
              unsigned int      cur_k;
              Feature           tmp;

            public:
              NGramConstIterator (const FeatureSet& _f,
                                  const NGramSpec&  _spec) : f (_f),
                                                             spec (_spec),
                                                             cur_f (0),
                                                             cur_n (1),
                                                             cur_k (0),
                                                             tmp (0, 0)
                {
                  if (f.size () == 0)
                    {
                      cur_f = f.size ();
                      cur_n = spec.n;
                      cur_k = spec.k;
                    }
                }

              static NGramConstIterator
              end (const FeatureSet& f,
                   const NGramSpec&  spec)
                {
                  NGramConstIterator rv (f, spec);

                  rv.cur_f = f.size ();
                  rv.cur_n = spec.n;
                  rv.cur_k = spec.k;

                  return rv;
                }

              bool
              operator!= (const NGramConstIterator& o)
                {
                  return cur_n != o.cur_n ||
                         cur_k != o.cur_k ||
                         cur_f != o.cur_f;
                }

              NGramConstIterator&
              operator++ ()
                {
                  ++cur_f;

                  if (cur_f >= f.size ())
                    {
                      if (cur_n > 1 &&
                          cur_k < spec.k &&
                          (cur_n - 1) * (2 + cur_k) < f.size ())
                        {
                          cur_f = (cur_n - 1) * (2 + cur_k);
                          ++cur_k;
                        }
                      else if (cur_n < spec.n &&
                               cur_n < f.size ())
                        {
                          cur_f = cur_n;
                          ++cur_n;
                          cur_k = 0;
                        }
                      else
                        {
                          cur_f = f.size ();
                          cur_n = spec.n;
                          cur_k = spec.k;
                        }
                    }

                  return *this;
                }

              const Feature*
              operator-> ()
                {
                  tmp = f[cur_f];

                  for (unsigned int n = 1; n < cur_n; ++n)
                    {
                      tmp.id = hash_combine (tmp.id,
                                             f[cur_f - (1 + cur_k) * n].id);

                      tmp.weight *= f[cur_f - (1 + cur_k) * n].weight;
                    }

                  return &tmp;
                }
          };

        void
        get_partial_linear (const IndexedFeatureSet*     f,
                            uint64_t                     minindex,
                            uint64_t                     maxindex,
                            std::vector<EstimateResult>& rv) const
          {
            if (f)
              {
                for (unsigned int ns = 0; ns < 256; ++ns)
                  {
                    if ((*f)[ns] && ! is_dotproducted (ns))
                      {
                        NGramSpec spec = get_ngram_spec (ns);

                        for (NGramConstIterator y (*(*f)[ns], spec);
                             y != NGramConstIterator::end (*(*f)[ns], spec);
                             ++y)
                          {
                            uint64_t id = y->id;
                            uint64_t baseprobe = id % header->num_weights;
                            
                            if (baseprobe >= minindex && baseprobe < maxindex)
                              {
                                float w = y->weight;

                                for (uint64_t offset = 0; 
                                     offset < rv.size (); 
                                     ++offset)
                                  {
                                    if (! rv[offset].skip)
                                      {
                                        uint64_t probe = 
                                          (id + offset) % header->num_weights;

                                        rv[offset].p += 
                                          w * weights[probe].value;
                                        rv[offset].wdotx += 
                                          w * weights[probe].value;
                                        rv[offset].xnorm += w * w;
                                        ++rv[offset].n;
                                      }
                                  }
                              }
                          }
                      }
                  }
              }

            if (header->constant_feature && minindex == 0)
              {
                for (uint64_t offset = 0; offset < rv.size (); ++offset)
                  {
                    if (! rv[offset].skip)
                      {
                        rv[offset].p += weights[offset].value;
                        rv[offset].wdotx += weights[offset].value;
                        rv[offset].xnorm += 1.0f;
                        ++rv[offset].n;
                      }
                  }
              }
          }

        void
        get_adaptive_norm_partial_linear (const IndexedFeatureSet* f,
                                          uint64_t                 minindex,
                                          uint64_t                 maxindex,
                                          std::vector<UpdateInfo>& info) const
          {
            if (f)
              {
                for (unsigned int ns = 0; ns < 256; ++ns)
                  {
                    if ((*f)[ns] && ! is_dotproducted (ns))
                      {
                        NGramSpec spec = get_ngram_spec (ns);

                        for (NGramConstIterator y (*(*f)[ns], spec);
                             y != NGramConstIterator::end (*(*f)[ns], spec);
                             ++y)
                          {
                            uint64_t id = y->id;
                            uint64_t baseprobe = id % header->num_weights;

                            if (baseprobe >= minindex && baseprobe < maxindex)
                              {
                                float w = y->weight;
                                float wsq = w * w;

                                for (uint64_t offset = 0;
                                     offset < info.size () && wsq > 0;
                                     ++offset)
                                  {
                                    if (info[offset].gsq > 0)
                                      {
                                        uint64_t probe = 
                                          (id + offset) % header->num_weights;

                                        float sum_sq_grad = 
                                          weights[probe].sum_sq_grad;

                                        sum_sq_grad += info[offset].gsq * wsq;

                                        info[offset].adaptivexnorm +=
                                          wsq * rsqrtf (sum_sq_grad);
                                      }
                                  }
                              }
                          }
                      }
                  }
              }

            if (header->constant_feature && minindex == 0)
              {
                for (uint64_t offset = 0; offset < info.size (); ++offset)
                  {
                    if (info[offset].gsq > 0)
                      {
                        float sum_sq_grad = weights[offset].sum_sq_grad;

                        sum_sq_grad += info[offset].gsq;

                        info[offset].adaptivexnorm += rsqrtf (sum_sq_grad);
                      }
                  }
              }
          }

        void
        update_partial_linear (const IndexedFeatureSet*       f,
                               const std::vector<UpdateInfo>& info,
                               uint64_t                       minindex,
                               uint64_t                       maxindex)
          {
            if (f)
              {
                for (unsigned int ns = 0; ns < 256; ++ns)
                  {
                    if ((*f)[ns] && ! is_dotproducted (ns))
                      {
                        NGramSpec spec = get_ngram_spec (ns);

                        for (NGramConstIterator y (*(*f)[ns], spec);
                             y != NGramConstIterator::end (*(*f)[ns], spec);
                             ++y)
                          {
                            uint64_t id = y->id;
                            uint64_t baseprobe = id % header->num_weights;

                            if (baseprobe >= minindex && baseprobe < maxindex)
                              {
                                float w = y->weight;
                                float wsq = w * w;

                                for (uint64_t offset = 0;
                                     offset < info.size () && wsq > 0;
                                     ++offset)
                                  {
                                    if (info[offset].gsq > 0)
                                      {
                                        uint64_t probe = 
                                          (id + offset) % header->num_weights;

                                        weights[probe].sum_sq_grad += 
                                          info[offset].gsq * wsq;

                                        if (info[offset].adaptive)
                                          {
                                            weights[probe].value -= 
                                              info[offset].sh * w * 
                                              rsqrtf (weights[probe].sum_sq_grad);
                                          }
                                        else
                                          {
                                            weights[probe].value -=
                                              info[offset].sh * w;
                                          }
                                      }
                                  }
                              }
                          }
                      }
                  }
              }

            if (header->constant_feature && minindex == 0)
              {
                for (uint64_t offset = 0; offset < info.size (); ++offset)
                  {
                    if (info[offset].gsq > 0)
                      {
                        weights[offset].sum_sq_grad += info[offset].gsq;

                        if (info[offset].adaptive)
                          {
                            weights[offset].value -= 
                              info[offset].sh * 
                              rsqrtf (weights[offset].sum_sq_grad);
                          }
                        else
                          {
                            weights[offset].value -= info[offset].sh;
                          }
                      }
                  }
              }
          }

        static uint64_t
        hash_combine (uint64_t a,
                      uint64_t b)
          {
            return a ^ (b + 0x9e3779b97f4a7c13ULL + (b << 6) + (b >> 2));
          }

        void
        get_partial_quadratic (const IndexedFeatureSet*     f,
                               uint8_t                      a,
                               uint8_t                      b,
                               uint64_t                     minindex,
                               uint64_t                     maxindex,
                               std::vector<EstimateResult>& rv) const
          {
            if (f)
              {
                if ((*f)[a] && (*f)[b] 
                    && ! is_dotproducted (a) && ! is_dotproducted (b))
                  {
                    NGramSpec spec_a = get_ngram_spec (a);
                    NGramSpec spec_b = get_ngram_spec (b);

                    for (NGramConstIterator x (*(*f)[a], spec_a);
                         x != NGramConstIterator::end (*(*f)[a], spec_a);
                         ++x)
                      {
                        for (NGramConstIterator y (*(*f)[b], spec_b);
                             y != NGramConstIterator::end (*(*f)[b], spec_b);
                             ++y)
                          {
                            uint64_t id = hash_combine (x->id, y->id);
                            uint64_t baseprobe = id % header->num_weights;

                            if (baseprobe >= minindex && baseprobe < maxindex)
                              {
                                float w = x->weight * y->weight;

                                for (uint64_t offset = 0; 
                                     offset < rv.size (); 
                                     ++offset)
                                  {
                                    if (! rv[offset].skip)
                                      {
                                        uint64_t probe = 
                                          (id + offset) % header->num_weights;
                                        
                                        rv[offset].p += 
                                          w * weights[probe].value;
                                        rv[offset].wdotx += 
                                          w * weights[probe].value;
                                        rv[offset].xnorm += w * w; 
                                        ++rv[offset].n;
                                      }
                                  }
                              }
                          }
                      }
                  }
              }
          }

        void
        get_adaptive_norm_partial_quadratic (const IndexedFeatureSet* f,
                                             uint8_t                  a,
                                             uint8_t                  b,
                                             uint64_t                 minindex,
                                             uint64_t                 maxindex,
                                             std::vector<UpdateInfo>& info) const
          {
            if (f)
              {
                if ((*f)[a] && (*f)[b]
                    && ! is_dotproducted (a) && ! is_dotproducted (b))
                  {
                    NGramSpec spec_a = get_ngram_spec (a);
                    NGramSpec spec_b = get_ngram_spec (b);

                    for (NGramConstIterator x (*(*f)[a], spec_a);
                         x != NGramConstIterator::end (*(*f)[a], spec_a);
                         ++x)
                      {
                        for (NGramConstIterator y (*(*f)[b], spec_b);
                             y != NGramConstIterator::end (*(*f)[b], spec_b);
                             ++y)
                          {
                            uint64_t id = hash_combine (x->id, y->id);
                            uint64_t baseprobe = id % header->num_weights;

                            if (baseprobe >= minindex && baseprobe < maxindex)
                              {
                                float w = x->weight * y->weight;
                                float wsq = w * w;

                                for (uint64_t offset = 0; 
                                     offset < info.size () && wsq > 0;
                                     ++offset)
                                  {
                                    if (info[offset].gsq > 0)
                                      {
                                        uint64_t probe = 
                                          (id + offset) % header->num_weights;

                                        float sum_sq_grad = 
                                          weights[probe].sum_sq_grad;

                                        sum_sq_grad += info[offset].gsq * wsq;

                                        info[offset].adaptivexnorm +=
                                          wsq * rsqrtf (sum_sq_grad);
                                      }
                                  }
                              }
                          }
                      }
                  }
              }
          }

        void
        update_partial_quadratic (const IndexedFeatureSet*       f,
                                  uint8_t                        a,
                                  uint8_t                        b,
                                  const std::vector<UpdateInfo>& info,
                                  uint64_t                       minindex,
                                  uint64_t                       maxindex)
          {
            if (f)
              {
                if ((*f)[a] && (*f)[b]
                    && ! is_dotproducted (a) && ! is_dotproducted (b))
                  {
                    NGramSpec spec_a = get_ngram_spec (a);
                    NGramSpec spec_b = get_ngram_spec (b);

                    for (NGramConstIterator x (*(*f)[a], spec_a);
                         x != NGramConstIterator::end (*(*f)[a], spec_a);
                         ++x)
                      {
                        for (NGramConstIterator y (*(*f)[b], spec_b);
                             y != NGramConstIterator::end (*(*f)[b], spec_b);
                             ++y)
                          {
                            uint64_t id = hash_combine (x->id, y->id);
                            uint64_t baseprobe = id % header->num_weights;

                            if (baseprobe >= minindex && baseprobe < maxindex)
                              {
                                float w = x->weight * y->weight;
                                float wsq = w * w;

                                for (uint64_t offset = 0; 
                                     offset < info.size () && wsq > 0;
                                     ++offset)
                                  {
                                    if (info[offset].gsq > 0)
                                      {
                                        uint64_t probe = 
                                          (id + offset) % header->num_weights;

                                        weights[probe].sum_sq_grad += 
                                          info[offset].gsq * wsq;

                                        if (info[offset].adaptive)
                                          {
                                            weights[probe].value -= 
                                              info[offset].sh * w * 
                                              rsqrtf (weights[probe].sum_sq_grad);
                                          }
                                        else
                                          {
                                            weights[probe].value -= 
                                              info[offset].sh * w;
                                          }
                                      }
                                  }
                              }
                          }
                      }
                  }
              }
          }

        static inline float
        square (float x)
          {
            return x * x;
          }

        static float
        dyadic_init (float      v,
                     uint64_t   id,
                     uint64_t   offset)
          {
            static const double golden_ratio = 1.61803398874989485;

            if (v == 0.0f)
              {
                double delta =   static_cast<double> (id) / 
                                 static_cast<double> (1ULL << 63)
                               + offset * golden_ratio;
                double delta_int;
                double delta_frac = modf (delta, &delta_int);

                v = 0.5f * (delta_frac - 0.5f);
              }

            return v;
          }

        void
        get_partial_dotproduct (const IndexedFeatureSet*     f,
                                uint8_t                      a,
                                uint8_t                      b,
                                uint8_t                      k,
                                uint64_t                     minindex,
                                uint64_t                     maxindex,
                                std::vector<EstimateResult>& rv) const
          {
            if (f)
              {
                if ((*f)[a] && (*f)[b] && k > 0)
                  {
                    NGramSpec spec_a = get_ngram_spec (a);
                    NGramSpec spec_b = get_ngram_spec (b);

                    for (NGramConstIterator x (*(*f)[a], spec_a),
                                            y (*(*f)[b], spec_b);
                         x != NGramConstIterator::end (*(*f)[a], spec_a) &&
                         y != NGramConstIterator::end (*(*f)[b], spec_b);
                         ++x, ++y)
                      {
                        const uint64_t x_id = x->id;
                        const float x_w = x->weight;
                        const uint64_t x_baseprobe = x_id % header->num_weights;

                        if (fabs (x_w) > 0 && 
                            x_baseprobe >= minindex && 
                            x_baseprobe < maxindex)
                          {
                            const uint64_t y_id = y->id;
                            const float y_w = y->weight;

                            for (uint64_t offset = 0, rc = 0;
                                 fabs (y_w) > 0 && offset < rv.size (); 
                                 ++offset, rc += 2 * k)
                              {
                                if (! rv[offset].skip)
                                  {
                                    for (uint64_t z = 0; z < k; ++z)
                                      {
                                        const uint64_t x_probe = 
                                          (x_id + k * offset + z) % header->num_weights;
                                        const uint64_t y_probe = 
                                          (y_id + k * offset + z) % header->num_weights;

                                        const float x_val = 
                                          x_w * 
                                          dyadic_init (weights[x_probe].value,
                                                       x_id,
                                                       rc + 2 * z);

                                        const float y_val = 
                                          y_w * 
                                          dyadic_init (weights[y_probe].value,
                                                       y_id,
                                                       rc + 2 * z + 1);

                                        rv[offset].p += x_val * y_val;
                                        rv[offset].adotb += x_val * y_val;
                                        rv[offset].anormplusbnorm +=
                                          square (x_val) + square (y_val);
                                        rv[offset].n += 2;
                                      }
                                  }
                              }
                          }
                      }
                  }
              }
          }

        static float
        hyperbolic_smalls (float a,
                           float b,
                           float s)
          {
            float v = 0.0f;

            if (fabsf (s) > 1e-2)
              {
                v = b / 362880.0f; v *= s;
                v += a / 40320.0f; v *= s;
                v += b / 5040.0f; v *= s;
                v += a / 720.f; v *= s;
                v += b / 120.f; v *= s;
              }

            v += a / 24.0f; v *= s;
            v += b / 6.0f; v *= s;
            v += a / 2.0f; v *= s;
            v += b; v *= s;
            v += a;

            return v;
          }

        static float
        hyperbolic_mediums (float a,
                            float b,
                            float s)
          {
            // evaluate the straightforward way
            
            float exps = expf (s);
            float expminuss = 1.0 / exps;

            return 
              0.5f * a * (exps + expminuss) + 0.5f * b * (exps - expminuss);
          }

        static float
        hyperbolic_larges (float a,
                           float b,
                           float s)
          {
            // expf (s + log (abssum) - log (2)) * signsum +
            // expf (-s + log (absdiff) - log (2)) * signdiff

            static const float logtwo = 0.69314718055994531f;

            float sum = a + b;
            float diff = a - b;

            float abssum = fabsf (sum);
            float absdiff = fabsf (diff);

            if (abssum > 0 && absdiff > 0)
              {
                float logabssum = log (abssum);
                float logabsdiff = log (absdiff);
                float signsum = (sum > 0) ? 1 : -1;
                float signdiff = (diff > 0) ? 1 : -1;

                return expf (s + logabssum - logtwo) * signsum
                     + expf (-s + logabsdiff - logtwo) * signdiff;
              }
            else if (abssum > 0)
              {
                float logabssum = log (abssum);
                float signsum = (sum > 0) ? 1 : -1;

                return expf (s + logabssum - logtwo) * signsum;
              }
            else if (absdiff > 0)
              {
                float logabsdiff = log (absdiff);
                float signdiff = (diff > 0) ? 1 : -1;

                return expf (-s + logabsdiff - logtwo) * signdiff;
              }
            else // everything is zero to machine precision?
              {
                return a;
              }
          }

        static float
        hyperbolic_safe (float a,
                         float b,
                         float s)
          {
            float abs_s = fabsf (s);

            // a <- a cosh (s) + b sinh (s)

            return (abs_s < 1e-1) ? hyperbolic_smalls (a, b, s) :
                   (abs_s < 3) ? hyperbolic_mediums (a, b, s) :
                                 hyperbolic_larges (a, b, s);
          }

        static void
        hyperbolic_update (Weight*  a,
                           float    xa,
                           Weight*  b,
                           float    xb,
                           float    sh,
                           float    decay)
          {
            // a <- a cosh (-sh) + b sinh (-sh)
            // b <- b cosh (-sh) + a sinh (-sh)
            
            float a_new = decay * hyperbolic_safe (xa * a->value, 
                                                   xb * b->value, 
                                                   -sh);
            float b_new = decay * hyperbolic_safe (xb * b->value, 
                                                   xa * a->value, 
                                                   -sh);

            a->value = a_new / xa;
            b->value = b_new / xb;
          }

        void
        update_partial_dotproduct (const IndexedFeatureSet*     f,
                                   uint8_t                      a,
                                   uint8_t                      b,
                                   uint8_t                      k,
                                   const std::vector<UpdateInfo>& info,
                                   uint64_t                     minindex,
                                   uint64_t                     maxindex)
          {
            if (f)
              {
                if ((*f)[a] && (*f)[b] && k > 0)
                  {
                    NGramSpec spec_a = get_ngram_spec (a);
                    NGramSpec spec_b = get_ngram_spec (b);

                    for (NGramConstIterator x (*(*f)[a], spec_a),
                                            y (*(*f)[b], spec_b);
                         x != NGramConstIterator::end (*(*f)[a], spec_a) &&
                         y != NGramConstIterator::end (*(*f)[b], spec_b);
                         ++x, ++y)
                      {
                        const uint64_t x_id = x->id;
                        const float x_w = x->weight;
                        const uint64_t x_baseprobe = x_id % header->num_weights;

                        if (x_w > 0 && 
                            x_baseprobe >= minindex && 
                            x_baseprobe < maxindex)
                          {
                            const uint64_t y_id = y->id;
                            const float y_w = y->weight;

                            for (uint64_t offset = 0, rc = 0;
                                 y_w > 0 && offset < info.size (); 
                                 ++offset, rc += 2 * k)
                              {
                                if (info[offset].gsq > 0)
                                  {
                                    for (uint64_t z = 0; z < k; ++z)
                                      {
                                        const uint64_t x_probe = 
                                          (x_id + k * offset + z) % header->num_weights;
                                        const uint64_t y_probe = 
                                          (y_id + k * offset + z) % header->num_weights;

                                        weights[x_probe].value = 
                                          dyadic_init (weights[x_probe].value,
                                                       x_id,
                                                       rc + 2 * z);

                                        weights[y_probe].value = 
                                          dyadic_init (weights[y_probe].value,
                                                       y_id,
                                                       rc + 2 * z + 1);

                                        hyperbolic_update (weights + x_probe,
                                                           x_w,
                                                           weights + y_probe,
                                                           y_w,
                                                           info[offset].sh,
                                                           info[offset].decay);
                                      }
                                  }
                              }
                          }
                      }
                  }
              }
          }

      public:
        Regressor (const Open& params)
          {
            fd = open (params.pathname.c_str (),
                       params.is_read_only ? O_RDONLY : (O_RDWR | O_CREAT),
                       S_IRUSR | S_IWUSR | 
                       S_IRGRP | S_IWGRP | 
                       S_IROTH | S_IWOTH);

            if (fd < 0)
              {
                throw std::runtime_error (strerror (errno));
              }
            try
              {
                off_t end = lseek (fd, 0, SEEK_END);
                if (end == (off_t) -1)
                  {
                    throw std::runtime_error (strerror (errno));
                  }
                else if (end == 0)
                  {
                    if (ftruncate (fd, params.header.file_size ()) < 0 ||
                        lseek (fd, 0, SEEK_SET) == (off_t) -1 ||
                        write (fd,
                               &params.header,
                               sizeof (params.header)) < 
                        (ssize_t) sizeof (params.header))
                      {
                        throw std::runtime_error (strerror (errno));
                      }
                  }

                initialize_db_from_fd (params.is_read_only);
              }
            catch (...)
              {
                close (fd);
                throw;
              }
          }

        const Header&
        get_header () const
          {
            return *header;
          }

        void
        get_partial (const IndexedFeatureSet*     f,
                     uint64_t                     thread_no,
                     uint64_t                     num_threads,
                     std::vector<EstimateResult>& rv) const
          {
            float modulus = 
              ceil (static_cast<float> (header->num_weights) / 
                    static_cast<float> (num_threads));

            uint64_t minindex = thread_no * modulus;
            uint64_t maxindex = (1 + thread_no) * modulus;

            get_partial_linear (f, minindex, maxindex, rv);

            for (unsigned int n = 0; n < header->num_quadratic; ++n)
              {
                get_partial_quadratic (f,
                                       header->quadratic[n].first,
                                       header->quadratic[n].second,
                                       minindex,
                                       maxindex,
                                       rv);
              }

            for (unsigned int n = 0; n < header->num_dotproduct; ++n)
              {
                get_partial_dotproduct (f,
                                        header->dotproduct[n].a,
                                        header->dotproduct[n].b,
                                        header->dotproduct[n].k,
                                        minindex,
                                        maxindex,
                                        rv);
              }
          }

        void
        get_adaptive_norm_partial (const IndexedFeatureSet* f,
                                   uint64_t                 thread_no,
                                   uint64_t                 num_threads,
                                   std::vector<UpdateInfo>& rv) const
          {
            float modulus = 
              ceil (static_cast<float> (header->num_weights) / 
                    static_cast<float> (num_threads));

            uint64_t minindex = thread_no * modulus;
            uint64_t maxindex = (1 + thread_no) * modulus;

            get_adaptive_norm_partial_linear (f, minindex, maxindex, rv);

            for (unsigned int n = 0; n < header->num_quadratic; ++n)
              {
                get_adaptive_norm_partial_quadratic 
                  (f,
                   header->quadratic[n].first,
                   header->quadratic[n].second,
                   minindex,
                   maxindex,
                   rv);
              }

            // uh ... not sure how the dotproduct stuff participates
            // in the adaptive norm ... maybe if i figured that out
            // i wouldn't need separate regularization?
            //
            // for (unsigned int n = 0; n < header->num_dotproduct; ++n)
            //   {
            //     get_adaptive_norm_partial_dotproduct 
            //       (f,
            //        header->dotproduct[n].a,
            //        header->dotproduct[n].b,
            //        header->dotproduct[n].k,
            //        minindex,
            //        maxindex,
            //        rv);
            //   }
          }

        void
        update_partial (const IndexedFeatureSet*        f,
                        const std::vector<UpdateInfo>&  info,
                        uint64_t                        thread_no,
                        uint64_t                        num_threads)
          {
            float modulus = 
              ceil (static_cast<float> (header->num_weights) / 
                    static_cast<float> (num_threads));

            uint64_t minindex = thread_no * modulus;
            uint64_t maxindex = (1 + thread_no) * modulus;

            update_partial_linear (f, info, minindex, maxindex);

            for (unsigned int n = 0; n < header->num_quadratic; ++n)
              {
                update_partial_quadratic (f,
                                          header->quadratic[n].first,
                                          header->quadratic[n].second,
                                          info,
                                          minindex,
                                          maxindex);
              }

            for (unsigned int n = 0; n < header->num_dotproduct; ++n)
              {
                update_partial_dotproduct (f,
                                          header->dotproduct[n].a,
                                          header->dotproduct[n].b,
                                          header->dotproduct[n].k,
                                          info,
                                          minindex,
                                          maxindex);
              }
          }

        ~Regressor ()
          {
            msync (base, header->file_size (), MS_ASYNC);
            munmap (base, header->file_size ());
            close (fd);
          }
    };
}

#endif // __REGRESSOR_HH__
