/*
 *  Copyright 2012-2013 APEXLab, Shanghai Jiao Tong University and Huawei Noah's Ark Lab
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */
/*!
 * \file gfmf_ucut.h
 * \brief This file implements the user time-dependent general functional matrix factorization,
 *        we specially implement this algorithm to optimize the memory and time cost of the algorithm.
 * 
 * Acknowlegdment: This work is a joint work of Shanghai Jiao Tong University and Huawei Noah's Ark Lab
 *
 * \author Tianqi Chen: tqchen@apex.sjtu.edu.cn
 */
// UCut stands for user time cutting
#ifndef _GFMF_UCUT_H_
#define _GFMF_UCUT_H_
// Note: most part of the code in this file is maintaining data structures instead of learning
// The learning related functions are
//    1. make_grad_stats : get gradient boosting statistics for each of instance
//    2. merger.do_merge(implemented in gfmf_singlevar_cut.h): find the cutting solution based on statistics
//    3. merge_add_update: update the statistics after one function is added

#include "../../apex_svd.h"
#include "../base-solver/apex_svd_base.h"
#include "../../apex_svd_model.h"
#include "../../apex-utils/apex_config.h"
#include "gfmf_singlevar_cut.h"

#include <cstring>
#include <climits>
#include <vector>

// name space for general functional matrix factorization
namespace gfmf{
    using namespace apex_tensor;
    using namespace apex_svd;
    // UCut model for each of the user
    class UCutModel{        
    public:
        // model parameter 
        struct Param{
            // number of root groups
            unsigned num_roots;
            // number of hdata
            uint64_t num_hdata;
            // number of piece in begin value and weight
            uint64_t num_piece;
            // reserved part
            int reserved[ 32 ];
            Param( void ){
                num_roots = 0;
                num_hdata = 0;
                num_piece = 0;
                memset( reserved, 0, sizeof( reserved ) );
            }           
        };        
        // information entry of each header
        struct InfoEntry{
            // pointer to next entry
            int next;
            // begin end position of entry position, use 64 bit unsigned for safe reason 
            uint64_t cut_begin;
            // number of cuts in data
            unsigned num_cut;
        };
        // reference of info entry
        struct ReferInfoEntry{
            InfoEntry *ptr;
            ReferInfoEntry(){}
            ReferInfoEntry( InfoEntry *p ):ptr(p){}
            inline static bool cmp_addr( const ReferInfoEntry &a, const ReferInfoEntry &b ){
                return (a.ptr)->cut_begin < (b.ptr)->cut_begin;
            }
        };
    public:
        // parameter
        Param param;
        // linklist of header data
        std::vector<InfoEntry> hdata;
        // begin value of each interval, the begin value of first piece is ignored
        std::vector<float> begin_value;
        // weight of each interval
        std::vector<float> weight;
        //--------------------------
    private:
        // list of deleted entries
        std::vector<int> deleted_hdata;
    public:
        // query a slot index with respect to gid and feature value
        inline size_t query_id( const InfoEntry &e, float fvalue ) const{
            size_t start = e.cut_begin;
            size_t end   = start + e.num_cut;
            
            apex_utils::assert_true( start < end, "start < end" );
            if( start+1 == end ) return start;
            // binary search for slot
            size_t idx = std::lower_bound( begin_value.begin() + start + 1, 
                                           begin_value.begin() + end, fvalue ) - begin_value.begin();
            return idx - 1;
        }
        inline float query_value( const InfoEntry &e, float fvalue ) const{
            size_t idx = query_id( e, fvalue );
            return weight[ idx ];
        }
        inline int &kfactor( const InfoEntry &e ){
            return *((int*)( &begin_value[ e.cut_begin ] ));
        }
        // add a cut setting to current model
        inline InfoEntry add_cut( int kfactor,
                                  const std::vector<float> &begin_value, 
                                  const std::vector<float> &weight ){
            InfoEntry e;
            e.cut_begin = param.num_piece;
            e.num_cut = static_cast<unsigned>( begin_value.size() );
            param.num_piece += begin_value.size();

            for( size_t i = 0; i < begin_value.size(); i ++ ){
                (this->begin_value).push_back( begin_value[i] );
                (this->weight).push_back( weight[i] );
            }
            this->kfactor( e ) = kfactor;
            return e;
        }
        
        // push hdata entry to specific group
        inline void push_hdata( int rid, InfoEntry e ){
            apex_utils::assert_true( e.num_cut != 0, "can not push empty hdata" );            
            if( hdata[ rid ].num_cut == 0 ){
                e.next = -1; hdata[ rid ] = e; 
            }else{               
                int hid;
                if( deleted_hdata.size() != 0 ){
                    hid = deleted_hdata.back();
                    deleted_hdata.pop_back();   
                }else{                    
                    hid = param.num_hdata ++;
                    hdata.resize( hdata.size() + 1 );
                    apex_utils::assert_true( param.num_hdata < INT_MAX, "hdata size exceed limit");
                }
                hdata[ hid ] = hdata[ rid ];
                e.next = hid; hdata[ rid ] = e;
            }
        }
        // mark delete hdata 
        inline void mark_delete_hdata( int pid ){
            // delete from linked list
            hdata[ pid ].num_cut = 0;
            hdata[ pid ].next = -1; 
            if( (unsigned)pid >= param.num_roots ) deleted_hdata.push_back( pid );
        }
        // clean up useless space here
        inline void defragment( int nround ){
            // move address, use pointer carefully!!!!, can not change hdata.size() while operating
            std::vector<ReferInfoEntry> vec;
            for( size_t i = 0; i < param.num_roots; i ++ ){
                for( int pid = (int)i; pid != -1; pid = hdata[ pid ].next ){
                    if( hdata[ pid ].num_cut == 0 ) continue;
                    vec.push_back( ReferInfoEntry( &hdata[ pid ] ) );
                }
            }

            std::sort( vec.begin(), vec.end(), ReferInfoEntry::cmp_addr );
            uint64_t saddr = 0;
            for( size_t i = 0; i < vec.size(); i ++ ){
                InfoEntry &e = *(vec[i].ptr);
                apex_utils::assert_true( e.num_cut > 0,"bug");
                if( saddr < e.cut_begin ){ 
                    memmove( &begin_value[ saddr ], &begin_value[ e.cut_begin ], sizeof(float)*e.num_cut );
                    memmove( &weight[ saddr ], &weight[ e.cut_begin ], sizeof(float)*e.num_cut );
                    e.cut_begin = saddr; 
                }else{
                    apex_utils::assert_true( saddr == e.cut_begin, "error in defragment process" );
                }
                saddr += e.num_cut;
            }
            // change size of hdata
            param.num_piece = saddr;
            begin_value.resize( param.num_piece );
            weight.resize( param.num_piece );
                        
            if( check_bug ){
                fprintf( stderr, "\nnum_hdata=%d, avg_hdata=%lf, avg_piece=%lf, piece/hdata=%lf\n",
                     (int)param.num_hdata,
                     (double)(param.num_hdata- deleted_hdata.size()) / param.num_roots, 
                     (double) param.num_piece / param.num_roots,
                     (double) param.num_piece / (param.num_hdata- deleted_hdata.size()) );
            }
        }
        // initialize model parameters
        inline void init_model( void ){
            param.num_piece = 0;
            param.num_hdata = param.num_roots;
            hdata.resize( param.num_hdata );
            begin_value.resize( param.num_piece );
            weight.resize( param.num_piece );
            
            for( unsigned i = 0; i < param.num_roots; i ++ ){
                hdata[i].num_cut = 0; hdata[i].next = -1;
            }
        }
        // load model from file 
        inline void load_model( FILE *fi ){
            apex_utils::assert_true( fread( &param, sizeof(Param), 1, fi ) > 0, "load UCutModel Param" );
            hdata.resize( param.num_hdata );            
            begin_value.resize( param.num_piece );
            weight.resize( param.num_piece );
            if( param.num_hdata != 0 ){
                apex_utils::assert_true( fread( &hdata[0], sizeof(InfoEntry), hdata.size(), fi ) > 0, "load UCutModel Node" );
            }
            if( param.num_piece != 0 ) {
                apex_utils::assert_true( fread( &begin_value[0], sizeof(float), begin_value.size(), fi ) > 0, "load UCutModel Node" );
                apex_utils::assert_true( fread( &weight[0], sizeof(float), weight.size(), fi ) > 0, "load UCutModel Node" );
            }
            // manage deleted space
            deleted_hdata.clear();
            for( size_t i = param.num_roots; i < hdata.size(); i ++ ){
                if( hdata[i].num_cut == 0 ) deleted_hdata.push_back( i );
            }            
        }
        // save model to file
        inline void save_model( FILE *fo ) const{
            fwrite( &param, sizeof(Param), 1, fo );
            fwrite( &hdata[0], sizeof(InfoEntry), hdata.size(), fo );
            fwrite( &begin_value[0], sizeof(float), begin_value.size(), fo );
            fwrite( &weight[0], sizeof(float), weight.size(), fo );
        }
    };  
};

#include "../../apex_svd.h"

namespace gfmf{
    using namespace apex_svd;
    // training param
    struct MFCutTrainParam{
        // minimum of kfactor, we do ucut for latent factor between [min_kfactor, max_kfactor), kfactor = 0 means bias term
        int min_kfactor;
        // maximum of kfactor
        int max_kfactor;
        // maximum size of buffer, set it to maximum number of records a user can have, for example, set it to number of items will be a safe choice
        int max_buf_size;
        // start round of ucut process
        int start_ucut;
        // end round of ucut
        int end_ucut;
        // weight decay of u cut after each round
        float wd_ucut;
        // number of user group
        int num_ugroup;
        // whether to use memory more frugally
        int frugal_mode;
        // normalize mean value
        int norm_mean;
        // bucket size to do prebucket of input timestamp, simply set to 0
        int bucket_size;
        // minimum tolerance of cut that can go into UCUT model, it is set to 1, 
        int min_cut_tol;
        // CUT Param
        MFCutTrainParam( void ){
            max_buf_size = 100000;
            min_kfactor = 0;
            max_kfactor = INT_MAX;
            start_ucut = 0;
            end_ucut = INT_MAX;
            wd_ucut    = 0.0f;
            min_cut_tol = 1;
            bucket_size = 0;
            frugal_mode = 0;
            norm_mean = 0;
        }
        inline void set_param( const char *name, const char *val ){
            if( !strcmp( name, "max_buf_size" ) ) max_buf_size = atoi( val );
            if( !strcmp( name, "min_kfactor" ) )  min_kfactor = atoi( val );
            if( !strcmp( name, "max_kfactor" ) )  max_kfactor = atoi( val );
            if( !strcmp( name, "start_ucut" ) )   start_ucut = atoi( val );
            if( !strcmp( name, "end_ucut" ) )     end_ucut = atoi( val );
            if( !strcmp( name, "wd_ucut" ) )      wd_ucut = (float)atof( val );
            if( !strcmp( name, "min_cut_tol" ) )    min_cut_tol = atoi( val );
            if( !strcmp( name, "bucket_size" ) )    bucket_size = atoi( val );
            if( !strcmp( name, "num_ugroup" ) )     num_ugroup = atoi( val );
            if( !strcmp( name, "frugal_mode" ) )    frugal_mode = atoi( val );
            if( !strcmp( name, "norm_mean" ) )      norm_mean = atoi( val );
        }
        inline bool update_ucut( int nround ) const{            
            if( nround < start_ucut || nround >= end_ucut ) return false;
            return true;
        }
        inline bool update_normal( int nround ) const{
            return true;
        }
    };
    
    struct MFUCutParamSwitchPack{
        SingleVarRTParamTrain param_bias;
        SingleVarRTParamTrain param_factor;
        inline void set_param( const char *name, const char *val ){
            if( !strncmp( name, "rt:", 3 ) ){
                param_bias.set_param( name+3, val );
                param_factor.set_param( name+3, val );
            }
            if( !strncmp( name, "rtb:", 4 ) ){
                param_bias.set_param( name+4, val );
            }
            if( !strncmp( name, "rtf:", 4 ) ){
                param_factor.set_param( name+4, val );
            }            
        }
    };

    // MF UCut
    class SVDMFUCutTrainer: public SVDPPFeature{
    protected:
        // extra ucut model
        UCutModel ucmodel;
        // training parameter
        MFCutTrainParam ucparam;
        // round counter
        int nround;
    protected:
        // entry for time index
        struct TEntry{
            float tstamp;
            int rindex;
            inline bool operator<( const TEntry &p )const{
                return tstamp < p.tstamp;
            }
            TEntry(){}
            TEntry( float ts, int rid ):tstamp(ts), rindex(rid){}
        };  
        // current row_index
        int curr_rindex;
        // data for timestamp info
        std::vector<TEntry> tdata;
    protected:
        // buffer of user latent factors generated by additional part
        CTensor2D buf_ufactor;
        // buffer of item latent factors in each instance
        CTensor2D buf_ifactor;
        // buffer of user biases generated by additional part
        CTensor1D buf_ubias;
        // tmp storage for prediction
        std::vector<float> tmp_pred, tmp_grad, tmp_sgrad;
    private:
        // normalize the entries to be zero mean
        inline void merge_mean_normalize( const UCutModel::InfoEntry &e, unsigned uid ){
            if( e.num_cut == 0 ) return;
            const float *bvalue = &ucmodel.begin_value[ e.cut_begin ];
            float *weight = &ucmodel.weight[ e.cut_begin ];
            size_t j = 0;            
            double wsum = 0.0;
            for( unsigned i = 1; i < e.num_cut; i ++ ){
                for( ;j < tdata.size() &&  tdata[j].tstamp < bvalue[i]; j ++ ){
                    wsum += weight[ i - 1 ];
                }
            }
            // final cut values
            for( ;j < tdata.size(); j ++ ){
                wsum += weight[ e.num_cut - 1 ]; 
            }
            float wmean = static_cast<float>( wsum / tdata.size() );
            for( unsigned i = 0; i < e.num_cut; i ++ ){
                weight[ i ] -= wmean;
            }
            const int kfactor = ucmodel.kfactor( e );
            if( kfactor == 0 ){
                model.u_bias[ uid ] += wmean;
            }else{
                model.W_user[ uid ][ kfactor - 1 ] += wmean; 
            }
        }
        // add information to tmp space by merge sort
        inline void merge_add( const UCutModel::InfoEntry &e, int sgn = +1 ){
            if( e.num_cut == 0 ) return;
            const float *bvalue = &ucmodel.begin_value[ e.cut_begin ];
            const float *weight = &ucmodel.weight[ e.cut_begin ];
            const int kfactor = ucmodel.kfactor( e );
            size_t j = 0;

            if( kfactor == 0 ){
                for( unsigned i = 1; i < e.num_cut; i ++ ){
                    for( ;j < tdata.size() &&  tdata[j].tstamp < bvalue[i]; j ++ ){
                        buf_ubias[ tdata[j].rindex ]+= sgn * weight[ i - 1 ]; 
                    }
                }
                // final cut values
                for( ;j < tdata.size(); j ++ ){
                    buf_ubias[ tdata[j].rindex ] += sgn * weight[ e.num_cut - 1 ]; 
                }    
            }else{
                for( unsigned i = 1; i < e.num_cut; i ++ ){
                    for( ;j < tdata.size() &&  tdata[j].tstamp < bvalue[i]; j ++ ){
                        buf_ufactor[ tdata[j].rindex ][ kfactor - 1 ] += sgn * weight[ i - 1 ]; 
                    }
                }
                // final cut values
                for( ;j < tdata.size(); j ++ ){
                    buf_ufactor[ tdata[j].rindex ][ kfactor - 1 ] += sgn * weight[ e.num_cut - 1 ]; 
                }
            }
        }
        // add information to tmp space by merge sort, update tmp_pred as well
        inline void merge_add_update( const float *bvalue, 
                                      const float *weight,
                                      const int kfactor, 
                                      const unsigned num_cut, 
                                      int sgn = +1 ){
            if( num_cut == 0 ) return;
            size_t j = 0;

            if( kfactor == 0 ){
                for( unsigned i = 1; i < num_cut; i ++ ){
                    for( ;j < tdata.size() &&  tdata[j].tstamp < bvalue[i]; j ++ ){
                        buf_ubias[ tdata[j].rindex ]+= sgn * weight[ i - 1 ]; 
                        tmp_pred[ tdata[j].rindex ] += sgn * weight[ i - 1 ]; 
                    }
                }
                // final cut values
                for( ;j < tdata.size(); j ++ ){
                    buf_ubias[ tdata[j].rindex ] += sgn * weight[ num_cut - 1 ];
                    tmp_pred[ tdata[j].rindex ]  += sgn * weight[ num_cut - 1 ]; 
                }                
            }else{
                for( unsigned i = 1; i < num_cut; i ++ ){
                    for( ;j < tdata.size() &&  tdata[j].tstamp < bvalue[i]; j ++ ){
                        buf_ufactor[ tdata[j].rindex ][ kfactor - 1 ] += sgn * weight[ i - 1 ]; 
                        tmp_pred[ tdata[j].rindex ] += sgn * weight[ i - 1 ] * buf_ifactor[ tdata[j].rindex ][ kfactor - 1 ]; 
                    }
                }
                // final cut values
                for( ;j < tdata.size(); j ++ ){
                    buf_ufactor[ tdata[j].rindex ][ kfactor - 1 ] += sgn * weight[ num_cut - 1 ];
                    tmp_pred[ tdata[j].rindex ] += sgn * weight[ num_cut - 1 ] * buf_ifactor[ tdata[j].rindex ][ kfactor - 1 ]; 
                }
            }
        }
        inline void merge_add_update( const UCutModel::InfoEntry &e, int sgn = +1 ){
            if( e.num_cut == 0 ) return;
            const float *bvalue = &ucmodel.begin_value[ e.cut_begin ];
            const float *weight = &ucmodel.weight[ e.cut_begin ];
            const int kfactor = ucmodel.kfactor( e );
            this->merge_add_update( bvalue, weight, kfactor, e.num_cut, sgn );
        }
        // add information to tmp space by merge sort, update tmp_pred as well
        inline void weight_add_update( unsigned uid, int kfactor, float wt, int sgn = +1 ){
            apex_utils::assert_true( ucparam.min_cut_tol != 0, "can not go here seems" );
            if( kfactor == 0 ){                
                model.u_bias[ uid ] += wt * sgn;
                for( size_t i = 0; i < tmp_pred.size(); i ++ ){
                    tmp_pred[ i ] += sgn *  wt;
                }
            }else{
                model.W_user[ uid ][ kfactor - 1 ] += wt * sgn;
                for( size_t i = 0; i < tmp_pred.size(); i ++ ){
                    tmp_pred[ i ] += sgn *  wt * buf_ifactor[ i ][ kfactor - 1 ];
                }
            }
        }
    protected:
        // cutting algos
        MFUCutParamSwitchPack svpack;
        SingleVarRTParamTrain svparam;
        ComboSingleVarCutMerger merger;
    public:
        SVDMFUCutTrainer( const SVDTypeParam &mtype ):
            SVDPPFeature( mtype ), 
            merger(svparam){
        }
        virtual ~SVDMFUCutTrainer( void ){
            if( this->init_end ){
                tensor::free_space( buf_ubias );
                if( ucparam.max_kfactor > 1 ){
                    tensor::free_space( buf_ufactor );
                    tensor::free_space( buf_ifactor );
                }
            }
        }
    protected:
        /*!
         * \brief prepare statisics of gradient and second order gradient 
         */
        inline void make_grad_stats( const SVDPlusBlock & data, int kfactor ){
            // gradient
            tmp_grad.resize( tmp_pred.size() );
            // second order gradient
            tmp_sgrad.resize( tmp_pred.size() );

            if( kfactor == 0 ){
                // kfactor == 0 mean bias
                for( int i = 0; i < data.data.num_row; i ++ ){
                    float r = data.data[i].label;
                    float p = active_type::map_active( tmp_pred[i], model.mtype.active_type );
                    tmp_grad[ i ] =  - active_type::cal_grad ( r, p, model.mtype.active_type );
                    tmp_sgrad[ i ]=  - active_type::cal_sgrad( r, p, model.mtype.active_type );
                }
            }else{
                for( int i = 0; i < data.data.num_row; i ++ ){
                    float qi = buf_ifactor[ i ][ kfactor - 1 ];
                    float r = data.data[i].label;
                    float p = active_type::map_active( tmp_pred[i], model.mtype.active_type );
                    tmp_grad[ i ] = - qi * active_type::cal_grad ( r, p, model.mtype.active_type );
                    tmp_sgrad[ i ] = - qi * qi * active_type::cal_sgrad( r, p, model.mtype.active_type );
                }
            }
        }
    private:
        // regularize the corresponding weight, must be called together with reg factor cut
        inline void reg_factor_cut( unsigned uid, int kfactor, UCutModel::InfoEntry e ){
            if( ucparam.wd_ucut < 1e-6f ) return;
            float step = ucparam.wd_ucut * svparam.learning_rate; 
            for( size_t i = 0; i < tmp_pred.size(); i ++ ){                
                float pu   = buf_ufactor[ i ][ kfactor ];
                tmp_pred[ i ] -= buf_ifactor[ i ][kfactor] * pu * step;
                buf_ufactor[ i ][ kfactor ] *= ( 1.0f - step );
            }
            if( e.num_cut != 0 ){
                for( unsigned i = 0; i < e.num_cut; i ++ ){
                    ucmodel.weight[ e.cut_begin + i ] *= ( 1.0f - step );
                }
            }
        }

        inline void reg_factor_single( unsigned uid, int kfactor ){
            if( ucparam.wd_ucut < 1e-6f ) return;
            apex_utils::assert_true( ucparam.min_cut_tol != 0, "this can not happen, bug.." );
            float step = ucparam.wd_ucut * svparam.learning_rate; 
            float pu   = model.W_user[ uid ][ kfactor ];
            for( size_t i = 0; i < tmp_pred.size(); i ++ ){
                tmp_pred[ i ] -= buf_ifactor[ i ][ kfactor ] * (pu * step);
            }
            model.W_user[ uid ][ kfactor ] *= ( 1.0f- step );
        }

        inline void try_add_cut( std::vector<UCutModel::InfoEntry> &vec,
                                 unsigned uid, int kfactor,
                                 const std::vector<float> &begin_value,
                                 const std::vector<float> &weight ){
            if( weight.size() > (size_t)ucparam.min_cut_tol ){
                UCutModel::InfoEntry e = ucmodel.add_cut( kfactor, begin_value, weight );
                this->merge_add_update( e, +1 );
                vec.push_back( e );
                // reg ucut
                if( kfactor != 0 ) this->reg_factor_cut( uid, kfactor-1, e );
            }else{
                if( weight.size() == 1 && fabsf(weight[0]) > 1e-5f ){
                    this->weight_add_update( uid, kfactor, weight[0] );
                    // reg ucut
                    if( kfactor != 0 ) this->reg_factor_single( uid, kfactor-1 );
                }
            }
        }
    protected:
        inline void do_merge_cut( std::vector<float> &begin_value, std::vector<float> &weight,
                                  const float *bvleft , const float *wtleft , unsigned nleft,
                                  const float *bvright, const float *wtright, unsigned nright ){
            //!! again, be careful, such kind of bug offen occurs,
            //!! when we push back to begin_value, and weight
            begin_value.push_back( 0.0f );

            // I do not like pointers, just to take a try
            bvleft ++; bvright ++; nleft --; nright --;
            while( nleft != 0 && nright != 0 ){
                // push back weight
                weight.push_back( *wtleft + *wtright );
                const float df = *bvleft - *bvright;
                if( df < -rt_eps ){
                    begin_value.push_back( *bvleft );
                    bvleft ++; wtleft ++; nleft --;
                    continue;
                }
                if( df > rt_eps ){
                    begin_value.push_back( *bvright );
                    bvright ++; wtright ++; nright --;
                    continue;
                }
                {// equals
                    begin_value.push_back( *bvleft );
                    bvleft ++;  wtleft ++; nleft --;
                    bvright ++; wtright ++; nright --;
                }
            }
            while( nleft != 0 ){
                weight.push_back( *wtleft + *wtright );
                begin_value.push_back( *bvleft );
                bvleft ++; wtleft ++; nleft --;
            }
            while( nright != 0 ){
                weight.push_back( *wtleft + *wtright );
                begin_value.push_back( *bvright );
                bvright ++; wtright ++; nright --;
            }
            // last weight
            weight.push_back( *wtleft + *wtright );
        }
    private:
        inline void try_merge_cut( std::vector<UCutModel::InfoEntry> &vec,
                                   unsigned uid, int kfactor,
                                   const std::vector<float> &begin_value,
                                   const std::vector<float> &weight,
                                   const UCutModel::InfoEntry &old ){
            if( weight.size() > (unsigned)ucparam.min_cut_tol ){
                // update weight
                this->merge_add_update( &begin_value[0], &weight[0], kfactor, begin_value.size(), +1 );
                
                if( weight.size() == 1 ){
                    // update
                    for( unsigned i = 0; i < old.num_cut; i ++ ){
                        ucmodel.weight[ old.cut_begin + i ] += weight[ 0 ];
                    }
                    vec.push_back( old );
                    if( kfactor != 0 ) this->reg_factor_cut( uid, kfactor-1, old );                    
                }else{                    
                    std::vector<float> bv, wt;
                    this->do_merge_cut( bv, wt,
                                        &ucmodel.begin_value[ old.cut_begin ], &ucmodel.weight[ old.cut_begin ], old.num_cut, 
                                        &begin_value[0], &weight[0], begin_value.size() );
                    UCutModel::InfoEntry e = ucmodel.add_cut( kfactor, bv, wt );
                    vec.push_back( e );
                    // reg ucut
                    if( kfactor != 0 ) this->reg_factor_cut( uid, kfactor-1, e );
                }
            }else{
                if( weight.size() == 1 && fabsf(weight[0]) > 1e-5f ){
                    this->weight_add_update( uid, kfactor, weight[0] );
                }
                vec.push_back( old );
                // reg ucut
                if( kfactor != 0 ) this->reg_factor_cut( uid, kfactor-1, old );
            }
        }        
    protected:
        inline void build_cuts( std::vector<float> &begin_value,
                                 std::vector<float> &weight,                                 
                                 int kfactor ){ 
            
            // initialize list data
            SCStatList dlist( svparam );
            
            // discuss what we need
            for( size_t i = 0; i < tdata.size(); i ++ ){
                int rid = tdata[i].rindex;
                dlist.push_back( tdata[i].tstamp, tmp_grad[ rid ], tmp_sgrad[ rid ] );
            }
                        
            // do merge style learning
            merger.do_merge( dlist );
            
            // if we only get one piece( no cutting ), 
            // treat it as a normal ufactor update
            if( dlist.num_interval() == 1 && svparam.reg_method == 2 && ucparam.min_cut_tol != 0 ){
                int cid = dlist[0].next;
                begin_value.push_back( dlist[ cid ].begin_value - rt_eps );
                weight.push_back( svparam.calc_weight( dlist[ cid ].sum_grad + svparam.reg_lambda * svparam.mean_weight, 
                                                       dlist[ cid ].sum_sgrad ) );
            }else{
                // return back results
                for( int cid = dlist[0].next; cid != -1; cid = dlist[ cid ].next ){
                    begin_value.push_back( dlist[ cid ].begin_value - rt_eps );
                    weight.push_back( svparam.calc_weight( dlist[ cid ].sum_grad, dlist[ cid ].sum_sgrad ) );
                }
            }
        }
    private:
        // bootstrap ucut without deletion
        inline void bootstrap_ucuts( const SVDPlusBlock &data ){
            const unsigned uid = data.data[0].index_ufactor[0];
            std::vector<UCutModel::InfoEntry> buf;

            svparam = (ucparam.min_kfactor == 0) ? svpack.param_bias : svpack.param_factor;            
            // do one round addup            
            for( int k = ucparam.min_kfactor; k < ucparam.max_kfactor; k ++ ){
                if( k == 1 ) svparam = svpack.param_factor;
                {// make cuts
                    std::vector<float> begin_value, weight;
                    // mean weight
                    if( k != 0 ) {
                        svparam.mean_weight = model.W_user[ uid ][ k - 1 ]; 
                    }else{
                        svparam.mean_weight = 0.0f;
                    }
                    this->make_grad_stats( data, k );
                    this->build_cuts( begin_value, weight, k );
                    this->try_add_cut( buf, uid, k, begin_value, weight );
                }
            }

            for( size_t i = 0; i < buf.size(); i ++ ){
                ucmodel.push_hdata( uid, buf[i] );
            }
        }
        
        // add ucut, merge the new model into old ones
        inline void update_merge_ucuts( const SVDPlusBlock & data ){
            std::vector<UCutModel::InfoEntry> vec;

            const unsigned uid = data.data[0].index_ufactor[0];
            for( int pid = uid; pid != -1; ){                   
                apex_utils::assert_true( ucmodel.hdata[ pid ].num_cut != 0, "zero cuts do not need to be here" );
                vec.push_back( ucmodel.hdata[ pid ] ); 
                {
                    int nx = ucmodel.hdata[ pid ].next; 
                    ucmodel.mark_delete_hdata( pid ); pid = nx;
                }
            }
            
            apex_utils::assert_true( vec.size() == 0 || nround != 0, "BUGA" );
            
            std::vector<UCutModel::InfoEntry> buf;
            // svpack
            svparam = (ucparam.min_kfactor == 0) ? svpack.param_bias : svpack.param_factor;
            // do one round addup
            for( int k = ucparam.min_kfactor ; k < ucparam.max_kfactor; k ++ ){
                if( k == 1 ) svparam = svpack.param_factor;                
                {// make cuts
                    std::vector<float> begin_value, weight;
                    // mean weight
                    if( k != 0 ) {
                        svparam.mean_weight = model.W_user[ uid ][ k - 1 ]; 
                    }else{
                        svparam.mean_weight = 0.0f;
                    }
                    
                    this->make_grad_stats( data, k );
                    this->build_cuts( begin_value, weight, k );
                                        
                    if( vec.size() > 0 && ucmodel.kfactor( vec.back() ) == k ){
                        this->try_merge_cut( buf, uid, k, begin_value, weight, vec.back() );                        
                        vec.pop_back();
                    }else{                        
                        this->try_add_cut( buf, uid, k, begin_value, weight );
                    }
                }
            }
            apex_utils::assert_true( vec.size() == 0, "must have merge up all datas" );
            // push in new nodes
            for( size_t i = 0; i < buf.size(); i ++ ){
                ucmodel.push_hdata( uid, buf[i] );
            }
        }
    private:
        // calculate buf_ufactor and buf_ubias for each record, buf_ufactor and buf_ubias stores 
        // the effect of user time depedent parts
        inline void prepare_ucuts( const SVDPlusBlock &data ){ 
            tdata.resize( 0 );
            apex_utils::assert_true( data.data.num_row != 0, "must have at least one record in each block" );
            if( data.data.num_row > ucparam.max_buf_size ){
                fprintf( stderr, "num_row=%d, max_buf_size=%d\n", data.data.num_row, ucparam.max_buf_size );
                apex_utils::assert_true( data.data.num_row <= ucparam.max_buf_size, "buffer size exceeded" );
            }
            // add bucket size aware 
            if( ucparam.bucket_size == 0 ){
                for( int i = 0; i < data.data.num_row; i ++ ){
                    SVDFeatureCSR::Elem e = data.data[i];
                    apex_utils::assert_true( e.num_global != 0, "at least one global feature for tstamp" );                    
                    tdata.push_back( TEntry( e.value_global[0], i ) );
                    buf_ufactor[ i ] = 0.0f;
                    buf_ubias[ i ] = 0.0f;
                }
            }else{
                for( int i = 0; i < data.data.num_row; i ++ ){
                    SVDFeatureCSR::Elem e = data.data[i];
                    apex_utils::assert_true( e.num_global != 0, "at least one global feature for tstamp" );
                    int tstamp = static_cast<int>( e.value_global[0] );
                    tdata.push_back( TEntry( tstamp / ucparam.bucket_size, i ) );
                    buf_ufactor[ i ] = 0.0f;
                    buf_ubias[ i ] = 0.0f;
                }
            }
            // get the tdata 
            std::sort( tdata.begin(), tdata.end() );
            // get uid
            const unsigned uid = data.data[0].index_ufactor[0];            
            for( int pid = uid; pid != -1; pid = ucmodel.hdata[ pid ].next ){
                // if we want to normalize the cutting entries
                if( ucparam.norm_mean !=  0 )this->merge_mean_normalize( ucmodel.hdata[ pid ], uid );
                // do merge sort style adding
                this->merge_add( ucmodel.hdata[ pid ] );
            }
        }        
        inline static SVDFeatureCSR::Elem strip_extra_info( SVDFeatureCSR::Elem e ){
            e.index_global ++; e.value_global ++; e.num_global --;
            apex_utils::assert_true( fabsf(e.value_ufactor[0]- 1.0f) < 1e-6f, "BUGA");
            apex_utils::assert_true( fabsf(e.value_ifactor[0]- 1.0f) < 1e-6f, "BUGB");
            return e;
        }
    private:
        inline void prepare_ifactor( CTensor1D tmp_ifactor, const SVDFeatureCSR::Elem &feature ){
            tmp_ifactor = 0.0f;
            for( int i = 0; i < feature.num_ifactor; i ++ ){
                const unsigned iid = feature.index_ifactor[i];
                const float    ival= feature.value_ifactor[i];
                tmp_ifactor += model.W_item[ iid ].sub_area( 0, tmp_ifactor.x_max ) * ival;
                // extra feature
                SparseFeatureArray<float>::Vector vec = feat_item[ iid ];
                for( int j = 0; j < vec.size(); j ++ ){                    
                    tmp_ifactor += model.W_item[ vec[j].index ].sub_area( 0, tmp_ifactor.x_max ) * vec[j].value * ival;
                }
            }
        }
    protected:        
        virtual void update_each( const SVDPlusBlock &data ){
            apex_utils::assert_true( data.extend_tag == svdpp_tag::DEFAULT, "all user data must be in one block" );
            // prepare ucuts
            this->prepare_ucuts( data );

            // prepare tmp predictions
            if( ucparam.update_ucut( nround ) ){  
                // forward prediction
                tmp_pred.resize( data.data.num_row );
                for( int i = 0; i < data.data.num_row; i ++ ){
                    this->curr_rindex = i;
                    tmp_pred[i] = this->pred_energy( strip_extra_info( data.data[i] ) );
                }
                // buffer item latent factor
                for( int i = 0; i < data.data.num_row; i ++ ){
                    this->prepare_ifactor( buf_ifactor[i], data.data[i] );
                }
            }

            if( ucparam.update_ucut( nround ) ){
                // Gradient boosting update goes to here
                const unsigned uid = data.data[0].index_ufactor[0];
                // the logic of boostrap_ucut and update_megrge_ucuts are basically the same
                //!! NOTE: Most of the code maintains the data structure instead of performing learning algorithm
                //
                if( ucmodel.hdata[ uid ].num_cut == 0 ){
                    this->bootstrap_ucuts( data );
                }else{
                    this->update_merge_ucuts( data );
                }
            }

            // do normal update, normal SVDFeature update, so that the code supports the feature-based matrix factorization as its baseline 
            if( ucparam.update_normal( nround ) ){
                for( int i = 0; i < data.data.num_row; i ++ ){
                    this->curr_rindex = i;
                    this->update_inner( strip_extra_info( data.data[i] ) );
                }
            }
        }
        // add buf_ufactor of current instance to user latent factor
        virtual void prepare_svdpp( CTensor1D &tmp_ufactor ){
            SVDPPFeature::prepare_svdpp( tmp_ufactor );
            CTensor1D sub;
            if( buf_ufactor.x_max > 0 ){
                sub = tmp_ufactor.sub_area( 0, buf_ufactor.x_max );
                sub += buf_ufactor[ curr_rindex ];
            }
        }
        // add buf_ubias
        virtual float get_bias_svdpp( void ){
            return SVDPPFeature::get_bias_svdpp() + buf_ubias[ curr_rindex ];
        }
    public:
        virtual void set_param( const char *name, const char *val ){
            SVDPPFeature::set_param( name, val );
            ucparam.set_param( name, val );
            svpack.set_param( name, val );
        }
        virtual void load_model( FILE *fi ) {
            SVDPPFeature::load_model( fi );
            if( model.param.extend_flag == 0 ){
                ucmodel.init_model();
                model.param.extend_flag = 2;
            }else{
                ucmodel.load_model( fi );
            }
        }
        virtual void save_model( FILE *fo ) {
            SVDPPFeature::save_model( fo );
            ucmodel.defragment( nround );
            ucmodel.save_model( fo );
        }
        virtual void init_model( void ){
            ucmodel.param.num_roots = ucparam.num_ugroup;
            model.param.extend_flag = 2;
            SVDPPFeature::init_model();            
            ucmodel.init_model();
            
        }
        virtual void init_trainer( void ){
            SVDPPFeature::init_trainer();
            if( model.param.num_factor + 1 < ucparam.max_kfactor ) ucparam.max_kfactor = model.param.num_factor + 1;
            if( model.param.no_user_bias > ucparam.min_kfactor ) ucparam.min_kfactor = model.param.no_user_bias;

            if( ucparam.frugal_mode == 0 ){
                buf_ufactor.set_param( ucparam.max_buf_size, model.param.num_factor  );
            }else{
                buf_ufactor.set_param( ucparam.max_buf_size, ucparam.max_kfactor );                
            }
            
            buf_ubias.set_param( ucparam.max_buf_size );
            tensor::alloc_space( buf_ubias );
            buf_ubias   = 0.0f;
            tensor::alloc_space( buf_ufactor );
            buf_ufactor = 0.0f;
            buf_ifactor = clone( buf_ufactor );
        }
        virtual void predict( std::vector<float> &p, const SVDPlusBlock &data ){
            apex_utils::assert_true( data.extend_tag == svdpp_tag::DEFAULT, "all user data must be in one block" );
            this->prepare_ufeedback( data );
            // prepare ucuts
            this->prepare_ucuts( data );            
            
            p.clear();
            for( int i = 0; i < data.data.num_row; i ++ ){                
                this->curr_rindex = i;
                p.push_back( this->pred( strip_extra_info( data.data[i] ) ) );
            }           
        }
        virtual void set_round( int nround ){
            SVDPPFeature::set_round( nround );
            this->nround = nround;
        }
    };
};

#endif
