package com.mygdx.game.dl4j;

import org.deeplearning4j.rl4j.learning.sync.qlearning.QLearning;
import org.deeplearning4j.rl4j.learning.sync.qlearning.discrete.QLearningDiscreteDense;
import org.deeplearning4j.rl4j.network.dqn.DQNFactoryStdDense;
import org.deeplearning4j.rl4j.policy.DQNPolicy;
import org.deeplearning4j.rl4j.space.Box;
import org.deeplearning4j.rl4j.space.DiscreteSpace;
import org.deeplearning4j.rl4j.util.DataManager;
import org.nd4j.linalg.learning.config.Adam;

import java.io.IOException;
import java.util.logging.Logger;


/**
 *
 * Main example for CarTrain DQN
 *
 * **/
public class CarTrain
{

    public static QLearning.QLConfiguration CARTPOLE_QL =
            new QLearning.QLConfiguration(
                    10086,    //Random seed
                    150,    //Max step By epoch
                    3000, //Max step
                    3000, //Max size of experience replay
                    500,     //size of batches
                    10,    //target update (hard)
                    1,     //num step noop warmup
                    0.1,   //reward scaling
                    0.99,   //gamma
                    1.0f,    //td-error clipping
                    0.1f,   //min epsilon
                    1000,   //num step for eps greedy anneal
                    true    //double DQN
            );

    public static DQNFactoryStdDense.Configuration CARTPOLE_NET =
        DQNFactoryStdDense.Configuration.builder()
            .l2(0.001).updater(new Adam(0.0005)).numHiddenNodes(32).numLayer(3).build();

    public static void main(String[] args) throws IOException {
        CarMDP<Box, Integer, DiscreteSpace> mdp = new CarMDP<Box, Integer, DiscreteSpace>();
//        cartPole(mdp);
        loadCartpole(mdp);
        mdp.close();
    }


    public static void cartPole(CarMDP<Box, Integer, DiscreteSpace> mdp ) throws IOException {

        //record the training data in rl4j-data in a new folder (save)
        DataManager manager = new DataManager(true);

        //define the mdp from gym (name, render)
        //define the training
        QLearningDiscreteDense<Box> dql = new QLearningDiscreteDense(mdp, CARTPOLE_NET, CARTPOLE_QL, manager);

        //train
        dql.train();

        //get the final policy
        DQNPolicy<Box> pol = dql.getPolicy();

//        serialize and save (serialization showcase, but not required)
        System.out.println("serialize and save ");
        pol.save("/pol1.zip");


    }


    public static void loadCartpole(CarMDP<Box, Integer, DiscreteSpace> mdp) throws IOException {

        //showcase serialization by using the trained agent on a new similar mdp (but render it this time)

        //define the mdp from gym (name, render)
//        CarMDP<Box, Integer, DiscreteSpace> mdp = new CarMDP<Box, Integer, DiscreteSpace>();

        //load the previous agent
        DQNPolicy<Box> pol2 = DQNPolicy.load("/pol1.zip");

        //evaluate the agent
        double rewards = 0;
        int size=1000;
        for (int i = 0; i < size; i++) {
            double reward = pol2.play(mdp);
            rewards += reward;
            System.out.println("Reward: " + reward);
        }

        System.out.println("average: " + rewards/size);

    }
}
