/*
 *  Copyright 2012-2013 APEX Data & Knowledge Management Lab, Shanghai Jiao Tong University
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */
/*!
 * \file apex_gradboost.h
 * \brief SVDFeature Wrapper for gradient boosters
 * \author Tianqi Chen: tqchen@apex.sjtu.edu.cn
 */
#ifndef _APEX_GRADBOOST_H_
#define _APEX_GRADBOOST_H_

#include "apex_booster.h"
#include "../../apex-utils/apex_config.h"

namespace apex_booster{    
    // extra training param for gradient boosting machine
    struct GBMTrainParam{
    public:
        /*! \brief learning rate */
        float learning_rate;
        /*! \brief whether to use result buffer during training */
        int use_res_buf;
        /*! \brief whether we will keep use one booster */        
        int do_reboost;
        GBMTrainParam( void ){
            learning_rate = 0.01f; use_res_buf = 1; do_reboost = 0;
        }
        inline void set_param( const char *name, const char *val ){
            if( !strcmp("learning_rate" , name ) ) learning_rate = (float)atof( val );
            if( !strcmp("use_res_buf" , name ) ) use_res_buf = (float)atof( val );
            if( !strcmp("do_reboost" , name ) ) do_reboost = (float)atof( val );
        }
    };

    // model parameter for Gradient Boosting Machine
    struct GBMModelParam{
        // base score
        float base_score;
        // number of boosters
        int num_boosters;
        // configure of baseline predictor, if == 0, boost from 0, else boost from first global feature
        int baseline_mode;
        // type of tree used
        int booster_type;
        // number of items, note: each item is a root of forest
        int num_item;
        // number of features to be used in GBM
        int num_feature;
        // reserved parameters
        int reserved[ 32 ];
        // constructor
        GBMModelParam( void ){
            base_score = 0.0f;
            num_boosters = 0; 
            baseline_mode = 0; 
            booster_type = 0;
            num_item = num_feature = 0;
            memset( reserved, 0, sizeof( reserved ) );            
        }
        inline void set_param( const char *name, const char *val ){
            if( !strcmp( name, "base_score") )     base_score = (float)atof( val ); 
            if( !strcmp("booster_baseline", name ) ) baseline_mode = atoi( val );
            if( !strcmp("booster_type", name ) )     booster_type = atoi( val );
            if( !strcmp("num_item", name ) )         num_item = atoi( val );
            if( !strcmp("num_feature", name ) )      num_feature = atoi( val );
        }
    };

    /*! \brief model of gradient boosting machine */
    struct GBMModel{
        /*! \brief type of the solver */
        apex_svd::SVDTypeParam  mtype;
        /*! \brief model parameters */ 
        GBMModelParam param;
        /*! \brief component trees */ 
        std::vector<apex_booster::IBooster*> boosters;
        /*! \brief type of root weight, indicate auxiliary information of booster, such as which class this booster will add to in multi-class boosting*/ 
        std::vector<int> weight_type;
        /*! \brief free space of the model */
        inline void free_space( void ){
            for( size_t i = 0; i < boosters.size(); i ++ ){
                delete boosters[i];
            }
            boosters.clear(); weight_type.clear(); param.num_boosters = 0; 
        }
        inline void load_from_file( FILE *fi ){
            if( boosters.size() != 0 ) this->free_space();
            if( fread( &param, sizeof(GBMModelParam) , 1 , fi ) == 0 ){
                printf("error loading GBRT model\n"); exit( -1 );
            }
            boosters.resize( param.num_boosters );
            for( size_t i = 0; i < boosters.size(); i ++ ){
                boosters[ i ] = apex_booster::create_booster( param.booster_type );
                boosters[ i ]->load_model( fi );
            }
            {// load weight type
                weight_type.resize( param.num_boosters );
                if( param.num_boosters != 0 ){
                    apex_utils::assert_true( fread( &weight_type[0], sizeof(int), param.num_boosters, fi ) > 0, "load weight type" );
                }
            }
        } 
        inline void save_to_file( FILE *fo ) const{
            apex_utils::assert_true( param.num_boosters == (int)boosters.size(), "bug: GBM model inconsistent");
            fwrite( &param, sizeof(GBMModelParam) , 1 , fo );
            for( size_t i = 0; i < boosters.size(); i ++ ){
                boosters[ i ]->save_model( fo ); 
            }
            if( weight_type.size() != 0 ){
                fwrite( &weight_type[0], sizeof(int), weight_type.size(), fo );
            }
        }
        inline void push_back( apex_booster::IBooster *tptr, int wt_type = 0 ){
            boosters.push_back( tptr ); weight_type.push_back( wt_type ); param.num_boosters ++;
        }
        inline void check_init( void ) const{
            apex_utils::assert_true( param.num_boosters == (int)boosters.size(), "bug: GBM model inconsistent");
            apex_utils::assert_true( param.num_boosters == 0, "bug: GBM model inconsistent");
        }
    };
};

namespace apex_booster{
    // result buffer to temporally store results produced by GBM
    template<typename SType>
    class GBMResultBuffer{
    private:
        size_t dindex;
        bool first_round;
        std::vector<SType> buf;
    public:
        GBMResultBuffer( void ){ 
            dindex = 0; first_round = true; 
        }
        // move cursor before first
        inline void finish_round( void ){
            dindex = 0;
            first_round = false;
        }
        // move cursor to next 
        inline void next( void ){
            dindex ++;
            if( dindex > buf.size() ){
                apex_utils::assert_true( this->is_first_round(), "can't change buffer size after first round" );
                buf.push_back( 0.0 );
            }
        }
        inline bool is_first_round( void ) const{
            return first_round;
        }
        // current storage
        inline SType &curr( void ){
            apex_utils::assert_true( dindex != 0, "need to call next" );
            return buf[ dindex - 1 ];
        }
    };

    // GBM implementation
    class GBMTrainer: public apex_svd::ISVDTrainer{
    protected:
        GBMModel model;
        GBMTrainParam param;
    private:
        // saves all the configures
        apex_utils::ConfigSaver cfg;        
    private:
        // training data result buffer
        GBMResultBuffer<double> res_buf_train;
    protected:
        // tmp data that needed to be written by update stats
        std::vector<float> tmp_pred, tmp_grad, tmp_hess;
    private:        
        // data for booster
        std::vector<float> dgrad;
        std::vector<float> dhess;
        FMatrixS   dsmat;        
        std::vector<unsigned> dgroup_id;
    protected:
        GBMTrainer( const apex_svd::SVDTypeParam &mtype ){
            model.mtype = mtype;
        }
        virtual ~GBMTrainer( void ){
            model.free_space();
        }
    public:
        // model related interface
        virtual void set_param( const char *name, const char *val ){
            if( model.param.num_boosters == 0 ) model.param.set_param( name, val );
            param.set_param( name, val );
            // remember old parameters
            cfg.push_back( name, val );
        }
        // load model from file
        virtual void load_model( FILE *fi ) {
            model.load_from_file( fi );
        }
        // save model to file
        virtual void save_model( FILE *fo ) {
            model.save_to_file( fo );
        }
        // initialize model by defined setting
        virtual void init_model( void ){
            model.check_init();  
        }        
        // initialize trainer before training 
        virtual void init_trainer( void ){
        }
    private:        
        // generate prediction, 
        inline float pred( const apex_svd::SVDFeatureCSR::Elem &e ){
            //--- get group id, specified by item feature
            unsigned gid = 0;            
            if( model.param.num_item != 0 ){
                apex_utils::assert_true( e.num_ifactor == 1, "need exact 1 item id to specify item" );
                gid = e.index_ifactor[0];
            }
            // generate sum, if booster_baseline == 1, use first global feature, else use base_score
            double sum = model.param.baseline_mode == 1 ? e.value_global[0] : model.param.base_score;
            // generate sparse feature vector for prediction
            FMatrixS::Line sp;
            sp.findex = e.index_ufactor; 
            sp.fvalue = e.value_ufactor; 
            sp.len = e.num_ufactor;
            
            // if use result buffer, get result
            size_t istart = 0;            
            if( param.use_res_buf != 0 && param.do_reboost == 0 ){
                res_buf_train.next();
                if( !res_buf_train.is_first_round() ){
                    istart = model.boosters.size() - 1;
                    sum = res_buf_train.curr();
                }
            }                
            for( size_t i = istart; i < model.boosters.size(); i ++ ){
                sum += model.boosters[i]->predict( sp, gid );
            }
            // if use result buffer, record result back
            if( param.use_res_buf != 0 && param.do_reboost == 0 ){
                res_buf_train.curr() = sum;
            }
            return static_cast<float>( sum );
        }

        // create a new booster
        inline apex_booster::IBooster *get_booster( void ){
            apex_booster::IBooster *bst;
            if( param.do_reboost == 0 || model.boosters.size() == 0 ){
                bst = apex_booster::create_booster( model.param.booster_type );
            }else{
                bst = model.boosters.back();
            }
            cfg.before_first();
            while( cfg.next() ){
                bst->set_param( cfg.name(), cfg.val() );
            }

            if( param.do_reboost == 0 || model.boosters.size() == 0 ){
                char s[ 256 ];
                sprintf( s, "%d", model.param.num_item == 0 ? 1 : model.param.num_item );
                bst->set_param( "rt_num_group", s );
                bst->init_trainer();
            }
            return bst;
        }
    protected:
        // add instance to sparse matrix 
        inline void add_instance( const apex_svd::SVDFeatureCSR::Elem &e, int gid, float grad, float hess ){
            // add sparse matrix
            FMatrixS::Line sp;
            sp.findex = e.index_ufactor;
            sp.fvalue = e.value_ufactor;
            sp.len    = e.num_ufactor;                       
            if( gid >= 0 ){
                dgroup_id.push_back( gid );
            }
            apex_utils::assert_true( dsmat.add_row( sp ) == dgrad.size(), "BUG" );
            // add negative to the weight
            dgrad.push_back( grad );
            dhess.push_back( hess );
        }

        // add relavant gradient to the training batch
        inline void add_batch( const apex_svd::SVDPlusBlock &data ){
            for( int i = 0; i < data.data.num_row; i ++ ){
                apex_svd::SVDFeatureCSR::Elem e = data.data[i];
                if( tmp_hess[i] > 1e-6f ) {
                    const float grad = tmp_grad[i];
                    const float hess = tmp_hess[i]; 
                    
                    if( model.param.num_item != 0 ){
                        apex_utils::assert_true( e.num_ifactor == 1, "need exact 1 item id to specify item" );
                        const unsigned gid = e.index_ifactor[0];
                        this->add_instance( e, gid, grad, hess );
                    }else{
                        this->add_instance( e, -1, grad, hess );
                    }
                }
            }
        }
    protected:
        /*!
         * \brief update gradients and second gradients into tmp_grad, tmp_hess
         * \param tmp_pred prediction for each instance in data
         * \param data data block
         */
        virtual void update_stats( const std::vector<float> &tmp_pred, const apex_svd::SVDPlusBlock &data ) = 0;
    private:
        virtual void pred( std::vector<float> &p, const apex_svd::SVDPlusBlock &data ){
            p.resize( data.data.num_row );            
            for( int i = 0; i < data.data.num_row; i ++ ){
                p[ i ] = this->pred( data.data[i] );
            }
        }
    public:
        virtual void update( const apex_svd::SVDPlusBlock &data ){
            this->pred( tmp_pred, data );            
            {// tmp space setting
                tmp_grad.resize( data.data.num_row );
                tmp_hess.resize( data.data.num_row );
                std::fill( tmp_grad.begin(), tmp_grad.end(), 0.0f );
                std::fill( tmp_hess.begin(), tmp_hess.end(), 0.0f );
            }
            this->update_stats( tmp_pred, data );
            this->add_batch( data );
        }
        virtual void predict( std::vector<float> &p, const apex_svd::SVDPlusBlock &data ){ 
            this->pred( p, data );
            for( int i = 0; i < data.data.num_row; i ++ ){
                p[ i ] = apex_svd::active_type::map_active( p[ i ] , model.mtype.active_type ); 
            }
        }
        virtual void set_round( int nround ){
            // clear all the data
            dgrad.resize( 0 ); 
            dhess.resize( 0 );
            dsmat.clear();    
            dgroup_id.resize( 0 );
        }
        virtual void finish_round( void ){
            // try to train a new tree
            apex_booster::IBooster *bst = this->get_booster();
            // train the booster
            bst->do_boost( dgrad, dhess, dsmat, dgroup_id );            
            // support for do reboost
            if( param.do_reboost == 0 || model.boosters.size() == 0 ){
                model.push_back( bst );
            }else{
                apex_utils::assert_true( model.boosters.size() == 1, "reboost mode only support 1 booster" );
            }
            // res buf train
            res_buf_train.finish_round();
        }
    };
};

namespace apex_booster{
    // trainer that do regression or classification training
    class RegGBMTrainer: public GBMTrainer{
    protected:
        virtual void update_stats( const std::vector<float> &tmp_pred, const apex_svd::SVDPlusBlock &data ){
            using namespace apex_svd;
            for( int i = 0; i < data.data.num_row; i ++ ){
                float label = data.data[i].label;
                float pred   = active_type::map_active( tmp_pred[ i ], model.mtype.active_type );
                float grad   = - active_type::cal_grad ( label, pred, model.mtype.active_type );
                float hess   = - active_type::cal_sgrad( label, pred, model.mtype.active_type );                
                tmp_grad[ i ] = grad;
                tmp_hess[ i ] = hess;
            }
        }
    public:
        RegGBMTrainer( const apex_svd::SVDTypeParam &mtype )
            :GBMTrainer( mtype ){
        }
    };     
};

namespace apex_booster{
    // LambdaRank Trainer: GBM
    class LambdaGBMTrainer: public GBMTrainer{
    private:
        // whether use pointwise sample mode
        int sample_pointwise;
    protected:
        /*! \brief entry for information used to generate lambda rank sample */
        struct Entry{
            /*! \brief predicted score */
            float score;
            /*! \brief label of current entry */            
            float label;
            /*! \brief index of current entry in the dataset */
            unsigned data_index;
            /*! \brief compare score by default */            
            inline bool operator<( const Entry &p )const{
                return score > p.score;
            }
        };
        /*! \brief sample for lambda rank */
        struct LambdaSample{
            /*! \brief weight for this sample, by default set it to 1.0 */
            float weight;
            /*! \brief data index of positive sample */
            unsigned pos_index;
            /*! \brief data index of negative sample */
            unsigned neg_index;
            /*! \brief constructor */
            LambdaSample( void ){}
            LambdaSample( unsigned pos_index, unsigned neg_index, float weight = 1.0f ){
                this->pos_index = pos_index;
                this->neg_index = neg_index;
                this->weight = weight;
            }
        };
    private:
        // update step, one pair
        inline void update_grad( const apex_svd::SVDPlusBlock &data, const LambdaSample &sample ){
            using namespace apex_svd;
            // skip samples with too small weight
            if( sample.weight < 1e-5f ) return;
            // point-wise mode
            if( sample_pointwise == 0 ) {
                float pred   =  active_type::map_active( tmp_pred[ sample.pos_index ] - tmp_pred[ sample.neg_index ], model.mtype.active_type );
                float grad   = -active_type::cal_grad ( 1.0f, pred, model.mtype.active_type ) * sample.weight;
                float hess   = -active_type::cal_sgrad( 1.0f, pred, model.mtype.active_type ) * sample.weight;
                tmp_grad[ sample.pos_index ] += grad;
                tmp_grad[ sample.neg_index ] -= grad;
                tmp_hess[ sample.pos_index ] += hess;
                tmp_hess[ sample.neg_index ] += hess;
            }else{
                {
                    float pp = active_type::map_active( tmp_pred[ sample.pos_index ], model.mtype.active_type );
                    tmp_grad[ sample.pos_index ] -= active_type::cal_grad ( 1.0f, pp, model.mtype.active_type ) * sample.weight;
                    tmp_hess[ sample.pos_index ] -= active_type::cal_sgrad( 1.0f, pp, model.mtype.active_type ) * sample.weight;
                }
                {
                    float np = active_type::map_active( tmp_pred[ sample.neg_index ], model.mtype.active_type );
                    tmp_grad[ sample.neg_index ] -= active_type::cal_grad ( 0.0f, np, model.mtype.active_type ) * sample.weight;
                    tmp_hess[ sample.neg_index ] -= active_type::cal_sgrad( 0.0f, np, model.mtype.active_type ) * sample.weight;
                }
            }
        }
    protected:
        /*!
         * \brief generate samples given the information in the data
         * \param samples output samples for lambda rank
         * \param data input information for sample generation, sorted by score
         */
        virtual void gen_sample( std::vector<LambdaSample> &samples, std::vector<Entry> &data ) = 0;
    protected:
        virtual void update_stats( const std::vector<float> &tmp_pred, const apex_svd::SVDPlusBlock &data ){
            std::vector<LambdaSample> samples;
            {// get prediction for each data
                Entry e;
                std::vector<Entry> info;
                for( int i = 0; i < data.data.num_row; i ++ ){
                    e.score = tmp_pred[i];
                    e.label = data.data[i].label;
                    e.data_index = static_cast<unsigned>( i );
                    info.push_back( e );
                }
                std::sort( info.begin(), info.end() );
                this->gen_sample( samples, info );
            }
            {// update samples
                for( size_t i = 0; i < samples.size(); i ++ ){
                    this->update_grad( data, samples[i] );
                }
            }
        }
    public:
        LambdaGBMTrainer( const apex_svd::SVDTypeParam &mtype )
            :GBMTrainer( mtype ){
            this->sample_pointwise = 0;
        }
        virtual void set_param( const char *name, const char *val ){
            if( !strcmp( name, "rank_sample_pointwise") ) sample_pointwise = atoi( val );             
            GBMTrainer::set_param( name, val );
        }
    };
};

namespace apex_booster{
    // MAP: GBM
    class APLambdaGBMTrainer : public LambdaGBMTrainer{
    private:
        // sample strategies
		int ap_maxn;
        int sample_num;
        int reject_method;
		int ap_start_round;
		int nround;
		float ap_alpha;
        float keep_prob;
    public:
        APLambdaGBMTrainer( const apex_svd::SVDTypeParam &mtype ):
            LambdaGBMTrainer( mtype ){
            sample_num = -1;
			ap_alpha = 0.0f;
            ap_maxn  = INT_MAX;
            reject_method = 0;
			ap_start_round = 0;
            keep_prob = 1.0f;
        }
        virtual ~APLambdaGBMTrainer(){}
	private:
		inline void gen_sweight( std::vector<LambdaSample> &samples, std::vector<Entry> &data ){
            // random drop off samples for bagging
            if( keep_prob < 1.0f - 1e-6f ){
                if( apex_random::sample_binary( keep_prob ) == 0 ) return;
            }
            // simple implementation using uniform sampling
            std::vector<int> pos, neg, pos_top;
            for( size_t i = 0; i < data.size(); i ++ ){
                if( data[i].label > 0.5f ){
                    pos.push_back( (int)i );
                    if( (int)i < ap_maxn ){
                        pos_top.push_back( (int)i );
                    }
                }else{
                    neg.push_back( (int)i );
                }
            }
            // start generate pairs
            if( pos.size() > 0 && neg.size() > 0 ){
                apex_random::shuffle( pos );
				apex_random::shuffle( neg );
                size_t snum = 0;
                if( sample_num > 0 ) snum = (size_t)sample_num;
                if( sample_num == -1 ) snum = neg.size();
                if( sample_num == -2 ) snum = pos.size();
                
                size_t nsample = 0;
                for( size_t i = 0; nsample < snum; i ++ ){
					float delta_ap = 0.0, wt;
					if( nround >= ap_start_round ){
						int pos_idx = pos[ i % pos.size() ];
						int neg_idx = neg[ i % neg.size() ];
                        
						if( pos_idx < neg_idx ) std::swap( pos_idx, neg_idx );
                        if( neg_idx < ap_maxn ){
                            // try to calculate delta ap
                            int pos_cnt = 0;
                            for( size_t j = 0; j < pos_top.size(); j ++ ){
                                if( pos_top[j] >= pos_idx ){
                                    delta_ap -= ( j + 1.0f ) / ( pos_idx + 1.0f ); break;
                                }
                                if( pos_top[j] > neg_idx ){
                                    delta_ap += 1.0f /( pos_top[j] + 1.0f ); 
                                }else{
                                    if( pos_top[j] != neg_idx ) pos_cnt ++;
                                }				
                            }
                            delta_ap += ( pos_cnt + 1.0f ) / ( neg_idx + 1.0f );
                            delta_ap /= pos.size();
                        }
                        apex_utils::assert_true( delta_ap < 1.0f + 1e-6f, "BUGA" ); 
                        apex_utils::assert_true( delta_ap ==  delta_ap, "BUGB" ); 
                        wt = ap_alpha * delta_ap + 1.0f - ap_alpha;
                    }else{
                        wt = 1.0f;                        
                    }
                    {
                        const int pos_idx = pos[ i % pos.size() ];
                        const int neg_idx = neg[ i % neg.size() ];
                        samples.push_back( LambdaSample( data[ pos_idx ].data_index, data[ neg_idx ].data_index, wt ) ); 
                        nsample ++;
                    }
                }
            }
		}
    protected:
        // implement me: this is an example implementation
        virtual void gen_sample( std::vector<LambdaSample> &samples, std::vector<Entry> &data ){
            gen_sweight( samples, data );
        }        
    public:
        // set param here
        virtual void set_param( const char *name, const char *val ){
            if( !strcmp( name, "rank_sample_num" ) ) sample_num = atoi( val );
            if( !strcmp( name, "lambda_ap_maxn" ) )    ap_maxn = atoi( val );
            if( !strcmp( name, "lambda_ap_alpha"  ) )  ap_alpha  = (float)atof( val );
            if( !strcmp( name, "lambda_ap_reject" ) )  reject_method = atoi( val );
            if( !strcmp( name, "lambda_ap_rstart" ) )  ap_start_round = atoi( val );
            if( !strcmp( name, "lambda_keep_prob" ) )  keep_prob = (float)atof( val );
            if( !strcmp( name, "subsample_prob" ) )    keep_prob = (float)atof( val );
            LambdaGBMTrainer::set_param( name, val );
        }
		virtual void set_round( int nround ){
			LambdaGBMTrainer::set_round( nround );
			this->nround = nround;
		}
    };
};

#endif
