#include <QDebug>

const double ALPHA = 0.8 , BETA = 0.8 , GAMMA = 0.1 , SIGMA = 0.57;

template <int n>
Learner<n>::Learner(MainWindow *w):QThread(), mainWindow(w), ran(false)
{
    QFile file(mainWindow->filename);
    if(!file.open(QIODevice::WriteOnly | QIODevice::Text))
        qDebug() << "File not open : " << mainWindow->filename << endl;
    QTextStream out(&file);

    mainWindow->readInput();

    int type = mainWindow->type;

    out<<type<<"\n";

    QList<double> p = mainWindow->physParameters;
    QList<double> m = mainWindow->motParameters;
    QList<double> s = mainWindow->senParameters;

    for(int i=0; i < p.size(); i++)
        out<<p[i]<<":";
    out << endl;
    for(int i=0; i < m.size(); i++)
        out<<m[i]<<":";
    out << endl;
    for(int i=0; i < s.size(); i++)
        out<<s[i]<<":";
    out << endl;


    switch(type)
    {
    case 0:
        sys = new OneServo;
        sys->addSensor(s[0], s[1], s[2]);
        sys->addPositionSensor(s[0], s[1], s[2]);
        dimPos = 1;
        break;
    case 1:
        sys = new OneWheel;
        sys->addSensor(s[0], s[1], s[2]);
        sys->addPositionSensor(s[0], s[1], s[2]);
        dimPos = 1;
        break;
    case 2:
        sys = new TwoServos;
        sys->addMotor(m[0], m[1], m[2], m[3]);
        sys->addSensor(s[0], s[1], s[2]);
        sys->addSensor(s[0], s[1], s[2]);
        sys->addPositionSensor(s[0], s[1], s[2]);
        dimPos = 2;
        break;
    default:
        sys = new TwoWheels;
        sys->addMotor(m[0], m[1], m[2], m[3]);
        sys->addSensor(s[0], s[1], s[2]);
        sys->addSensor(s[0], s[1], s[2]);
        sys->addPositionSensor(s[0], s[1], s[2]);
        dimPos = 3;
        break;
    }
    sys->setParameters(p);
    sys->addMotor(m[0], m[1], m[2], m[3]);
    sys->setConsign(QList<double>() << 0);
}
template <int n>
Learner<n>::~Learner()
{
    if(this->isRunning())
        this->quit();
    if(ran)
        delete sys;
}

template <int n>
inline double Learner<n>::reward(QList<double> params){
    double temp = 0;
    for(int i = params.size()-dimPos ; i < params.size() ; i++){
        double temp2 = params[i]-params[i-dimPos];
        temp += temp2*temp2;
    }
    return (1/(0.005 + temp*temp));
}

template <int n>
inline QList<double> Learner<n>::getState(QList<double> objective){
    QList<double> currentState = sys->state();
    currentState << objective;
    return currentState;
}

template <int n>
void Learner<n>::run()
{
    mainWindow->setDisabled(true);

    srand(time(NULL));
    LearningData<n> learn( ALPHA , BETA ,4 , 1 , 1);
    /*for(int i = 0 ; i < learn.states.size() ; i++)
        learn.states[i].V = reward(learn.states[i].params);*/

    bool stillGoing = true;
    while(stillGoing){
        stillGoing = false;
        Cell<n> *cell = learn.getLessVisited();
        while(cell != 0){
            episode(learn , cell);
            cell = learn.getLessVisited();
        }

        QList<double> params;
        for(int j = 0 ; j < n ; j++)
            params << 0;
        int k = 5;
        std::set<Cell<n>*> Ck;
        for(int i = 0 ; i < k ; i++){
            for(int j = 0 ; j < n ; j++)
                params[j] = rand()/ ((double)RAND_MAX);
            Ck.insert(learn.getCell(params));
        }
        for(typename std::set<Cell<n>*>::iterator it = Ck.begin() ; it != Ck.end() ; it++){
            State<n> objective = (*it)->getBarycenter();

            QList<double> pbary;
            for(int i = objective.params.size()-dimPos ; i < objective.params.size() ; i++)
                pbary << objective.params[i];
            //Explore...

            QList<double> currentState = getState(pbary);
            QList<double> action = (learn.getCell(currentState)->getVals(currentState)).cons;

            //Explore...
            sys->setConsign(action);

            //Wait that the system move and retrieve the position
            QThread::msleep(0);
            QList<double> nextState = getState(pbary);

            double learnedValue = reward(nextState);
            learnedValue += GAMMA*learn.getCell(nextState)->getVals(currentState).V;

            qDebug() << "Obj : " << pbary << " Action " << action;
            qDebug() << "LVB : " << learnedValue;
            qDebug() << "s.V : " << objective.V;



            double eps = 0.3;
            double error = learnedValue/objective.V;
            if(error<1-eps){
                (*it)->subdivide();
                stillGoing = true;
            }
        }
    }

    qDebug() << "Apprentissage terminé , exportation ...";
    learn.exportData(mainWindow->filename);
    ran = true;
    qDebug() << "Fin";
}


template<int n>
void Learner<n>::episode(LearningData<n> &learn , Cell<n> *cell){ //Implement CACLA episode for oneServo

    //Randomly choose an state s in the cell
    QList<double> currentState = cell->getRandomParams();
    QList<double> objective ;
    for(int i = currentState.size()-dimPos ; i < currentState.size() ; i++)
        objective << currentState[i];
    sys->setInState(currentState);

    QList<double> cons = (cell->getVals(currentState)).cons;

    int count = 0;
    double previousLV = 0;
    int N = 5;

    qDebug() << "Enter episode";
    while(count < N){
        //Generate a in a gaussian neighborhood of A[currentState]
        currentState = getState(objective);
        cell = learn.getCell(currentState);
        QList<double> action = (cell->getVals(currentState)).cons;
        qDebug() <<"objective" << objective <<  "Action conseillée " << action;
        for(int i = 0 ; i < cons.size() ; i++){
            double dr1 = (rand()+1.0)/(RAND_MAX+1.0);
            double dr2 = (rand()+1.0)/(RAND_MAX+1.0);
            double R =  sqrt(-2*log(dr1)) * cos(2*M_PI*dr2);
            double sig = SIGMA/(1.0+2*count) ;
            action[i] += sig*R;
            if(action[i] < 0)
                action[i] = 0;
            else if(action[i] > 1)
                action[i] = 1;
        }

        qDebug() << "Action réalisée " << action;

        //Explore...
        sys->setConsign(action);

        //Wait that the system move and retrieve the position
        QThread::msleep(100);
        QList<double> nextState = getState(objective);

        double learnedValue = reward(nextState);
        Cell<n>* nCell = learn.getCell(nextState);
        learnedValue += GAMMA*nCell->getVals(currentState).V;

        //TODO: change, because sc has V=0, and because the test on V is done in setVals
        qDebug() << "LV : " << learnedValue;
        cell->setVals(learnedValue , action, currentState);
        if(learnedValue >= previousLV)
            count++;
        else
            count = 0;
        previousLV = learnedValue;
        learn.getCell(currentState)->print();

        objective.clear();
        for(int i = 0 ; i < dimPos ; i++)
            objective << rand()/((double)RAND_MAX);
    }
    qDebug() << "Leave episode";
}


