/*
 *  Copyright 2012-2013 APEXLab, Shanghai Jiao Tong University and Huawei Noah's Ark Lab
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */
/*!
 * \file gfmf_singlevar_cut.h
 * \brief This file implements the user time-dependent general functional matrix factorization,
 *        we specially implement this algorithm to optimize the memory and time cost of the algorithm.
 * 
 * Acknowlegdment: This work is a joint work of Shanghai Jiao Tong University and Huawei Noah's Ark Lab
 *
 * \author Tianqi Chen: tqchen@apex.sjtu.edu.cn
 */

// implementation of single variable cutting algorithm, using greedy strategy
#ifndef _GFMF_SINGLEVAR_CUT_H_
#define _GFMF_SINGLEVAR_CUT_H_

#include <climits>
#include "../../apex-tensor/apex_random.h"
#include <cstring>
#include <algorithm>

namespace gfmf{
    // whether to check bugs
    const bool check_bug = false;
    
    const float rt_eps = 1e-5f;
    const float rt_2eps = rt_eps * 2.0f;
    inline double sqr( double a ){
        return a * a;
    }    
};

namespace gfmf{    
    // training parameter for reg tree
    struct SingleVarRTParamTrain{
        // learning step size for a time
        float learning_rate;
        // minimum loss change required for a split
        float min_split_loss;
        // minimum amount of weight allowed in a child
        float min_child_weight;
        // merge flag of data, 
        int   merge_flag;
        // maximum number of cuts
        int   max_piece;
        // regularization method used for model control, default = 0
        // 0: R = min_split_loss |T| -- control the number of leaves
        // 1: R = min_split_loss |T| + reg_lambda \sum_i |w_i| -- control the number of leaves, L1 regularization to each leaf weight
        int   reg_method;
        // regularization parameter 
        float reg_lambda;
        // used as temp variable, mean value of current weight
        float mean_weight;

        SingleVarRTParamTrain( void ){
            learning_rate = 0.3f;
            min_split_loss   = 1.0f;
            min_child_weight = 1.0f;
            max_piece = 64;
            merge_flag = 11;
            reg_method = 2;
            reg_lambda = 0.0f;
            mean_weight = 0.0f;
        }
        virtual void set_param( const char *name, const char *val ){
            if( !strcmp( name, "learning_rate") )     learning_rate = (float)atof( val );
            if( !strcmp( name, "min_split_loss") )    min_split_loss = (float)atof( val );
            if( !strcmp( name, "min_child_weight") )  min_child_weight = (float)atof( val ) / 4.0;
            if( !strcmp( name, "max_piece") )      max_piece = atoi( val );
            if( !strcmp( name, "merge_flag") )     merge_flag = atoi( val );
            if( !strcmp( name, "reg_method") )     reg_method = atoi( val );
            if( !strcmp( name, "reg_lambda") )     reg_lambda = (float)atof( val );
        }

        // functions for L1 cost
        static inline double threshold_L1( double w, double lambda ){
            if( w > +lambda ) return w - lambda;
            if( w < -lambda ) return w + lambda;
            return 0.0;
        }
        // calculate the cost of loss function
        inline double calc_cost( double sum_grad, double sum_sgrad ) const{
            switch( reg_method ){
            case 1 : return sqr( threshold_L1( sum_grad, reg_lambda ) ) / sum_sgrad;
            case 2 : return sqr( sum_grad ) / ( sum_sgrad + reg_lambda );
                // elstic net
            case 3 : return sqr( threshold_L1( sum_grad, 0.5 * reg_lambda ) ) / ( sum_sgrad + 0.5 * reg_lambda );
                // L2 reg with shifted center
            case 4 : return sqr( sum_grad + reg_lambda * mean_weight ) / ( sum_sgrad + reg_lambda );
            default: return sqr( sum_grad ) / sum_sgrad;
            }        
        }
        // calculate the weight of loss
        inline double calc_weight( double sum_grad, double sum_sgrad )const{
            if( sum_sgrad < min_child_weight ){
                return 0.0;
            }else{
                apex_utils::assert_true( sum_sgrad > 1e-5, "second order derivative too low" );
                switch( reg_method ){
                case 1: return - learning_rate * SingleVarRTParamTrain::threshold_L1( sum_grad, reg_lambda ) / sum_sgrad;
                case 2: return - learning_rate * sum_grad / ( sum_sgrad + reg_lambda );
                    // elstic net
                case 3: return - learning_rate * SingleVarRTParamTrain::threshold_L1( sum_grad, 0.5 * reg_lambda ) / ( sum_sgrad + 0.5 * reg_lambda );
                case 4: return - learning_rate * ( sum_grad + reg_lambda * mean_weight )/ ( sum_sgrad + reg_lambda );
                default: return - learning_rate * sum_grad / sum_sgrad;
                }
            }
        }
    };
        
    // data structure to store training information
    class SCStatList{
    public:
        // the storage type of state variable
        typedef double sc_float;
        // type of each state 
        struct Node{
            // pointer to previous and next node
            int prev, next;
            // begin feature value of current entry
            float begin_value;
            // sum of gradient and second order gradient 
            sc_float sum_grad, sum_sgrad;
        };
    private:
        const SingleVarRTParamTrain &param;
        size_t num_inst;
        std::vector<Node> nodes;
    public:
        SCStatList( const SingleVarRTParamTrain &pparam ):param(pparam){
            this->clear();
        }
        inline void clear( void ){
            nodes.clear();
            nodes.resize( 1 );
            nodes[0].prev = nodes[0].next = -1;
            num_inst = 0;
        }
        inline void push_back( float begin_value, sc_float sum_grad, sc_float sum_sgrad ){
            if( nodes.size() == 1 || begin_value - nodes.back().begin_value > rt_2eps ){
                Node n;
                n.begin_value = begin_value; 
                n.sum_grad    = sum_grad;
                n.sum_sgrad   = sum_sgrad;
                n.next = -1;
                if( nodes.size() != 0 ){
                    nodes.back().next = (int)nodes.size();
                    n.prev = (int)(nodes.size() - 1);
                }else{
                    n.prev = -1;
                }
                nodes.push_back( n );
                num_inst ++;
            }else{
                apex_utils::assert_true( begin_value - nodes.back().begin_value > -rt_2eps, "push to SC List must be sorted" );
                Node &n = nodes.back();
                n.sum_grad += sum_grad;
                n.sum_sgrad += sum_sgrad;
            }
        }
        // after inserting each pieces of data, make the split point 
        inline void make_split_middle( void ){
            float vtail = nodes[0].begin_value;
            for( size_t i = 1; i < nodes.size(); i ++ ){
                float bk = nodes[i].begin_value;
                nodes[i].begin_value = 0.5f * (bk + vtail);
                vtail = bk;
            }
        }
        // size of the list
        inline size_t num_interval( void ) const{
            return num_inst;
        }
        // calculate merge cost of two nodes
        inline double calc_merge_cost( int left, int right ) const{            
            return 
                ( param.calc_cost( nodes[ left  ].sum_grad, nodes[ left ].sum_sgrad ) +
                  param.calc_cost( nodes[ right ].sum_grad, nodes[ right ].sum_sgrad ) -
                  param.calc_cost( nodes[ left  ].sum_grad + nodes[ right ].sum_grad,
                                   nodes[ right ].sum_sgrad + nodes[ right ].sum_sgrad ) ) * 0.25;
            
        }
        // merge this node with previous node, delete previous node
        inline void merge_with_prev( int cid ){
            int pid = nodes[ cid ].prev;
            apex_utils::assert_true( pid != 0 && pid != -1, "must have previous node to merge" );
            // update pointer info
            nodes[ cid ].prev = nodes[ pid ].prev;
            if( nodes[ pid ].prev != -1 ){
                nodes[ nodes[ pid ].prev ].next = cid;
            }
            // update information
            nodes[ cid ].sum_grad  += nodes[ pid ].sum_grad;
            nodes[ cid ].sum_sgrad += nodes[ pid ].sum_sgrad;
            nodes[ cid ].begin_value = nodes[ pid ].begin_value;
            // mark delete
            nodes[ pid ].prev = nodes[ pid ].next = -1;
            // update stats
            num_inst --;
        }
        // merge this node with previous node, delete previous node
        inline void merge_with_next( int cid ){
            int nid = nodes[ cid ].next;
            apex_utils::assert_true( nid != -1, "must have previous node to merge" );
            // update pointer info
            nodes[ cid ].next = nodes[ nid ].next;
            if( nodes[ nid ].next != -1 ){
                nodes[ nodes[ nid ].next ].prev = cid;
            }
            // update information
            nodes[ cid ].sum_grad  += nodes[ nid ].sum_grad;
            nodes[ cid ].sum_sgrad += nodes[ nid ].sum_sgrad;
            // mark delete
            nodes[ nid ].prev = nodes[ nid ].next = -1;      
            // update stats
            num_inst --;
        }
        // whether current node is deleted
        inline bool is_deleted( int nid ) const{
            return nodes[ nid ].prev == -1 && nodes[ nid ].next == -1;
        }
        // constant view of node
        inline const Node &operator[]( int nid ) const{
            return nodes[ nid ];
        }
    };
};

namespace gfmf{
    // interface for merger 
    class ISingleVarCutMerger{
    public:
        virtual void do_merge( SCStatList &dlist ) = 0;
    };  
};

namespace gfmf{
    // Prunning merger that merges intervals that do not satisfies minimum condition for a interval
    class SingleVarMinMerger: ISingleVarCutMerger{
    private:
        class Queue{
        private:
            int head, tail;
            std::vector<int> queue;
        public:
            Queue( size_t sz ){
                queue.resize( sz + 1 );
                head = tail = 0;
            }
            inline void in( int a ){
                queue[ tail ] = a;
                tail = ( tail + 1 ) % queue.size();
            }
            inline int out( void ){
                int a = queue[ head ];
                head = ( head + 1 ) % queue.size();
                return a;
            }
            inline bool is_empty( void ) const{
                return head == tail;
            } 
        };
    private:
        const SingleVarRTParamTrain &param;
    public:
        SingleVarMinMerger( const SingleVarRTParamTrain &p ):param(p){}
        virtual void do_merge( SCStatList &dlist ){
            // initialize queue
            Queue queue( dlist.num_interval() );
            for( int cid = dlist[0].next; cid != -1; cid = dlist[cid].next ){
                if( dlist[ cid ].sum_sgrad < param.min_child_weight ){
                    queue.in( cid );
                }
            }
            // start merging
            while( !queue.is_empty() ){
                int cid = queue.out();
                if( dlist.is_deleted( cid ) ) continue;

                if( dlist[ cid ].prev == 0 ){
                    if( dlist[ cid ].next != -1 ) dlist.merge_with_next( cid );
                    else continue;
                }else{
                    if( dlist[ cid ].next == -1 ){
                        dlist.merge_with_prev( cid );
                    }else{
                        if( dlist.calc_merge_cost( cid, dlist[cid].prev ) < dlist.calc_merge_cost( cid, dlist[cid].next ) ){
                            dlist.merge_with_prev( cid );
                        }else{
                            dlist.merge_with_next( cid );
                        }
                    }
                }
                // add back to queue if it still is not OK
                if( dlist[ cid ].sum_sgrad < param.min_child_weight ) queue.in( cid ); 
            }
        }
    };
};

namespace gfmf{
    // Prunning merger that merges intervals with minimal cost until stopping creteria is met
    class SingleVarGreedyMerger: ISingleVarCutMerger{
    private:
        struct MEntry{
            int left, right;
            double cost;
            MEntry( int l, int r, double c ):left(l), right(r), cost(c){}
            inline bool operator<( const MEntry &p ) const{
                return cost > p.cost;
            }
        };
    private:
        const SingleVarRTParamTrain &param;
    public:
        SingleVarGreedyMerger( const SingleVarRTParamTrain &p ):param(p){}        

        virtual void do_merge( SCStatList &dlist ){
            std::vector<MEntry> data;
            apex_utils::assert_true( dlist[0].next != -1, "can't merge empty list" );
            // initialize points
            for( int cid = dlist[0].next; dlist[cid].next != -1; cid = dlist[cid].next ){
                data.push_back( MEntry( cid, dlist[cid].next, dlist.calc_merge_cost( cid, dlist[cid].next ) ) );
            }
            // make heap
            std::make_heap( data.begin(), data.end() );
            
            while( dlist.num_interval() > (size_t)param.max_piece 
                   || (data.size() > 0 && data.front().cost < param.min_split_loss) ){
                // get the data
                std::pop_heap( data.begin(), data.end() );
                MEntry e = data.back(); data.pop_back();
                if( dlist.is_deleted( e.left ) || dlist.is_deleted( e.right ) ) continue;
                
                apex_utils::assert_true( dlist[ e.left ].next == e.right, "must match, BUG" );                            
                dlist.merge_with_next( e.left );

                const int cid = e.left;
                if( dlist[ cid ].prev != 0 ){
                    data.push_back( MEntry( dlist[cid].prev, cid, dlist.calc_merge_cost( dlist[cid].prev, cid ) ) ); 
                    std::push_heap( data.begin(), data.end() );
                }                
                if( dlist[ cid ].next != -1 ) {
                    data.push_back( MEntry( cid, dlist[cid].next, dlist.calc_merge_cost( cid, dlist[cid].next ) ) ); 
                    std::push_heap( data.begin(), data.end() );                    
                }
            }             
        }    
    };
};

namespace gfmf{
    // a combo merger that uses different kinds of merging strategies
    class ComboSingleVarCutMerger{
    private:
        SingleVarMinMerger minmerger;
        SingleVarGreedyMerger greedymerger;
        const SingleVarRTParamTrain &param;
    public:
        ComboSingleVarCutMerger( const SingleVarRTParamTrain &pparam ):
            minmerger( pparam ), greedymerger( pparam ),
            param( pparam ){            
        }
        inline void do_merge( SCStatList &dlist ){
            int mf = param.merge_flag;
            if( mf % 10 != 0 ) minmerger.do_merge( dlist );
            mf = mf / 10;
            if( mf % 10 != 0 ) greedymerger.do_merge( dlist ); 
        }
    };
};

#endif
