#include "ML_DecisionStump.h"
#include "ML_DecisionStumpParams.h"
#include "ML_SubclassFolder.h"
#include "ML_DecisionStumpPreprocess.h"
#include "../Common/ErrorCode.h"
#include "../Tools/ZLogManager.h"
#include "../Math/MathTools.h"
#include "../Common/ZException.h"

GLOBAL_NAMESPACE_BEGIN
NAMESPACE_MACHINE_LEARNING_BEGIN

namespace
{
    // the vec is in ascending order
    static bool isValueInVector(int value, const std::vector<int>* vec)
    {
        return std::binary_search(vec->begin(), vec->end(), value);
    }
}

class MLDecisionStumpImpl
{
public:
    MLDecisionStumpImpl();
    MLDecisionStumpImpl(const MLDecisionStump& rhs);
    ~MLDecisionStumpImpl();
    friend class MLDecisionStump;

    void reset();
    int  doPreprocess();
    int  trainStump(MLDecisionStumpParams* stump, const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights);
    int  buildStump(const Eigen::MatrixXd& trianData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights);
    double evaluate(const Eigen::MatrixXd& data, int dataIdx, int classIdx);
    double evaluate(const Eigen::RowVectorXd& oneData, int classIdx);

    double evalError(const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights);

    void setSubClassCandidate(const MLSubClassFolder* subClasses);
    void setPreprocessData(MLDecisionStumpPreprocess* preProcess);

    void output(std::ostream& out);
    const MLDecisionStumpParams* getParameters() const;

    void setStumpParams(const MLDecisionStumpParams& param);

private:
    void clone(const MLDecisionStump& rhs);
    void generateThresholdCandidate();
    double evalError(MLDecisionStumpParams* stump, const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights);

private:
    MLDecisionStumpParams param_;
    MLDecisionStumpPreprocess *preProcess_;

    const MLSubClassFolder *subClasses_;
    int classIdx_;
    int diffClass_;

    //const std::vector< std::vector<int> >* subClassCandidate_;

    std::vector<double> thresholdCandidates_;
};

MLDecisionStumpImpl::MLDecisionStumpImpl()
    : subClasses_(NULL)
    , classIdx_(-1)
    , preProcess_(NULL)
{
}

MLDecisionStumpImpl::MLDecisionStumpImpl(const MLDecisionStump& rhs)
{
    clone(rhs);
}

MLDecisionStumpImpl::~MLDecisionStumpImpl()
{
}

void MLDecisionStumpImpl::clone(const MLDecisionStump& rhs)
{
    param_      = rhs.impl_->param_;
    classIdx_   = rhs.impl_->classIdx_;
    diffClass_  = rhs.impl_->diffClass_;
    subClasses_ = rhs.impl_->subClasses_;
    preProcess_ = rhs.impl_->preProcess_;
}

void MLDecisionStumpImpl::reset()
{
    subClasses_ = NULL;
    classIdx_ = -1;
    preProcess_ = NULL;
}

const MLDecisionStumpParams* MLDecisionStumpImpl::getParameters() const
{
    return &param_;
}

void MLDecisionStumpImpl::setSubClassCandidate(const MLSubClassFolder* subClasses)
{
    subClasses_ = subClasses;
}

void MLDecisionStumpImpl::setPreprocessData(MLDecisionStumpPreprocess* preProcess)
{
    preProcess_ = preProcess;
}

int MLDecisionStumpImpl::doPreprocess()
{
    int ret = ErrorCode::ERROR_NO_Error;
    if (!preProcess_)
        return ErrorCode::ERROR_Invalid_Parameter;
    return ret;
}

int MLDecisionStumpImpl::trainStump(MLDecisionStumpParams* stump, const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights)
{
    int ret = ErrorCode::ERROR_NO_Error;
    int nDataSize = trainData.rows();
    // only need to computer kc
    stump->k.resize(preProcess_->getClassSize(), 0);
    for (int c = 0; c < preProcess_->getClassSize(); c++)
    {
        if (!isValueInVector(c, &stump->subClass))
        {
            double wz = 0, wSum = 0;
            for (int i = 0; i < nDataSize; i++)
            {
                double w = weights(i, c);
                int pred = labels(i, c) == 1 ? 1 : -1;
                wz += w*pred;
                wSum += w;
            }
            stump->k[c] = MathTools::isZero(wSum) ? 0 : wz / wSum;
        }
    }
    return ret;
}

int MLDecisionStumpImpl::buildStump(const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights)
{
    int nDataSize = trainData.rows();
    int nDataDim = trainData.cols();
    if (nDataSize == 0 || nDataDim == 0)
    {
        DebugLog << "No data input";
        return ErrorCode::ERROR_Invalid_Parameter;
    }
    if (labels.rows() != nDataSize)
    {
        DebugLog << "Label and data dimensions are not the same";
        return ErrorCode::ERROR_Invalid_Parameter;
    }

    double minError = DBL_MAX;
    int    bestDim = -1;
    bool   bestGreater = false;
    double bestThreshold = 0;
    int    bestThresholdIdx = -1;
    int    bestSubClassIdx = -1;
    double bestAs = 0;
    double bestBs = 0;

    for (int cs = 0; cs < subClasses_->getSubClassSize(); cs++)
    {
        //stump.subClass = *subClassIter;
        for (int d = 0; d < preProcess_->getDimensionSize(); d++)
        {
            for (int i = 0; i < preProcess_->getThresholdSize(); i++)
            {
                double wcP = 0, wcN = 0, wP = 0, wN = 0;
                double wzk = 0;
                double threshold = 0;
                bool bGreater = true;
                for (int g = 0; g < 2; g++, bGreater = !bGreater)
                {
                    for (int c = 0; c < preProcess_->getClassSize(); c++)
                    {
                        MLDecisionStumpParams* stump = preProcess_->getStump(c, d, i, bGreater);
                        threshold = stump->threshold;
                        if (isValueInVector(c, subClasses_->getSubClass(cs)))
                        {
                            wcP += stump->a*stump->wcP;
                            wP  += stump->wcP;
                            wcN += stump->b*stump->wcN;
                            wN  += stump->wcN;
                        }
                        else
                        {
                            for (size_t j = 0; j < nDataSize; j++)
                            {
                                int pred = labels(j, c) == 1 ? 1 : -1;
                                double w = weights(j, c);
                                wzk += w*(pred - stump->k[c])*(pred - stump->k[c]);
                            }
                        }
                    }
                    double as = MathTools::isZero(wP) ? 0 : wcP / wP;
                    double bs = MathTools::isZero(wN) ? 0 : wcN / wN;
                    double error = (1 - as*as)*wP + (1 - bs*bs)*wN + wzk;
                    if (error < minError)
                    {
                        minError = error;
                        bestSubClassIdx = cs;
                        bestDim = d;
                        bestThreshold = threshold;
                        bestThresholdIdx = i;
                        bestAs = as;
                        bestBs = bs;
                        bestGreater = bGreater;
                    }
                }
                
            }
        }
    }

    assert(bestDim >= 0);
    classIdx_ = bestSubClassIdx;
    param_.subClass     = *subClasses_->getSubClass(bestSubClassIdx);
    param_.dimF         = bestDim;
    param_.bGreater     = bestGreater;
    param_.threshold    = bestThreshold;
    param_.threIdx      = bestThresholdIdx;
    param_.a = bestAs;
    param_.b = bestBs;

    return ErrorCode::ERROR_NO_Error;
}

double MLDecisionStumpImpl::evalError(MLDecisionStumpParams* thisStump, const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights)
{
    double err = 0;
    int nDataSize = trainData.rows();
    int nDataDim = trainData.cols();

    double wcP = 0, wcN = 0, wP = 0, wN = 0;
    double wzk = 0;
    double threshold = 0;
    for (int c = 0; c < preProcess_->getClassSize(); c++)
    {
        MLDecisionStumpParams* stump = preProcess_->getStump(c, thisStump->dimF, thisStump->threIdx, thisStump->bGreater);
        if (isValueInVector(c, &thisStump->subClass))
        {
            wcP += stump->a*stump->wcP;
            wP += stump->wcP;
            wcN += stump->b*stump->wcN;
            wN += stump->wcN;
        }
        else
        {
            for (size_t j = 0; j < nDataSize; j++)
            {
                int pred = labels(j, c) == 1 ? 1 : -1;
                double w = weights(j, c);
                wzk += w*(pred - stump->k[c])*(pred - stump->k[c]);
            }
        }
    }
    double as = MathTools::isZero(wP) ? 0 : wcP / wP;
    double bs = MathTools::isZero(wN) ? 0 : wcN / wN;
    err = (1 - as*as)*wP + (1 - bs*bs)*wN + wzk;

    return err;
}

double MLDecisionStumpImpl::evalError(const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights)
{
    return evalError(&param_, trainData, labels, weights);
}

double MLDecisionStumpImpl::evaluate(const Eigen::MatrixXd& data, int dataIdx, int classIdx)
{
    if (isValueInVector(classIdx, &param_.subClass))
    {
        if ((param_.bGreater && data(dataIdx, param_.dimF)>param_.threshold)
            || (!param_.bGreater && data(dataIdx, param_.dimF) < param_.threshold))
            return param_.a;
        else
            return param_.b;
    }
    else
        return param_.k[classIdx];
}

double MLDecisionStumpImpl::evaluate(const Eigen::RowVectorXd& oneData, int classIdx)
{
    if (isValueInVector(classIdx, &param_.subClass))
    {
        if ((param_.bGreater && oneData(param_.dimF)>param_.threshold)
            || (!param_.bGreater && oneData(param_.dimF) < param_.threshold))
            return param_.a;
        else
            return param_.b;
    }
    else
        return param_.k[classIdx];
}

void MLDecisionStumpImpl::generateThresholdCandidate()
{
    ZFW_EXCEPTION(Exception::ERR_NOT_IMPLEMENTED, "function not implemented", "MLDecisionStumpImpl::generateThresholdCandidate()");
}

void MLDecisionStumpImpl::output(std::ostream& out)
{
    out << " subclasses: ";
    for (std::vector<int>::iterator it = param_.subClass.begin(); it != param_.subClass.end(); it++)
    {
        out << *it << " ";
    }
    out << "\n";
    out << " featureDim: " << param_.dimF << "\n";
    out << " threshold:  " << param_.threshold << "\n";
    out << " greater:    " << param_.bGreater << "\n";
    out << " parameters: " << param_.a << " " << param_.b << "\n";
    out << " parameters: ";
    for (std::vector<double>::iterator it = param_.k.begin(); it != param_.k.end(); it++)
    {
        out << *it << " ";
    }
}

void MLDecisionStumpImpl::setStumpParams(const MLDecisionStumpParams& params)
{
    param_ = params;
}

//////////////////////////////////////////////////////////////////////////////
///

MLDecisionStump::MLDecisionStump()
    : impl_(new MLDecisionStumpImpl)
{
}

MLDecisionStump::MLDecisionStump(const MLDecisionStump& rhs)
    : impl_(new MLDecisionStumpImpl(rhs))
{
    
}

MLDecisionStump::~MLDecisionStump()
{
}

void MLDecisionStump::reset()
{
    impl_->reset();
}

int     MLDecisionStump::train    (const Eigen::MatrixXd& trainData, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights)
{
    int ret = ErrorCode::ERROR_NO_Error;
    ret = impl_->doPreprocess();
    if (ret != ErrorCode::ERROR_NO_Error)
        return ret;
    ret = impl_->buildStump(trainData, labels, weights);

    if (ret != ErrorCode::ERROR_NO_Error)
        return ret;

    ret = impl_->trainStump(&impl_->param_, trainData, labels, weights);
    return ret;
}

double  MLDecisionStump::evaluate (const Eigen::RowVectorXd& oneData, int classIdx) const
{
    return impl_->evaluate(oneData, classIdx);
}

double  MLDecisionStump::evaluate (const Eigen::MatrixXd& data, int dataIdx, int classIdx) const
{
    return impl_->evaluate(data, dataIdx, classIdx);
}

Eigen::RowVectorXi MLDecisionStump::predict(const Eigen::RowVectorXd& oneData) const
{
    Eigen::RowVectorXi ret;
    for (int c = 0; c < impl_->preProcess_->getClassSize(); c++)
    {
        ret(c) = impl_->evaluate(oneData, c);
    }
    return ret;
}

void MLDecisionStump::setSubClassFolder(MLSubClassFolder* subClasses)
{
    impl_->subClasses_ = subClasses;
}

void MLDecisionStump::setPreprocessData(MLDecisionStumpPreprocess* preProcess)
{
    impl_->setPreprocessData(preProcess);
}

double MLDecisionStump::evalError(const Eigen::MatrixXd& data, const Eigen::MatrixXi& labels, const Eigen::MatrixXd& weights) const
{
    return impl_->evalError(data, labels, weights);
}

int MLDecisionStump::getTheBestClassIdx()
{
    return impl_->classIdx_;
}

int MLDecisionStump::getLabelResponse(const Eigen::RowVectorXi& oneLabel, int classIdx) const
{
    return oneLabel(classIdx)==1 ? 1 : -1;
}

void MLDecisionStump::output(std::ostream& out)
{
    impl_->output(out);
}

const MLDecisionStumpParams* MLDecisionStump::getParameters() const
{
    return impl_->getParameters();
}

void MLDecisionStump::buildStump(const MLDecisionStumpParams& params)
{
    impl_->setStumpParams(params);
}

NAMESPACE_MACHINE_LEARNING_END
GLOBAL_NAMESPACE_END
