﻿using System;
using System.Collections.Generic;
using AdaptiveAgents.Distributions;
using AdaptiveAgents.Agents;
using AdaptiveAgents.Loggers;
using AdaptiveAgents.Games;
using System.Data;

namespace AdaptiveAgents.Experiments
{
    /// <summary>
    /// Ad experiment to test if a MDPDiscover agent will work better then OptimalMDP or a Greedy agent
    /// This experiment will show print to 3 XMLs file:
    /// 1. games using the MDPDiscover agent
    /// 2. games using the MDPOptimal agent
    /// 3. games using the Greedy agent
    /// Using the data of all the games - we will be able to determine witch is the better agent
    /// </summary>
    class MDPExperiment : Experiment
    {
        //data members
        double competence;
        double competence1;
        double competence2;

        double eps;
        double eps1;
        double eps2;

        /// <summary>
        /// Constructor - 
        /// </summary>
        public MDPExperiment()
            : base(50, 3, 1) {}

        //data members to Store the experiment data
        private DataTable dtMDPDiscover; //for a run with MDPDiscover agent
        private DataTable dtMDPOptimal; //for a run with MDPOptimal agent
        private DataTable dtGreedy; //for a run with Greedy agent

        //enum for the different agents
        private enum AgentType { Greedy, MDPDiscovery, MDPOptimal };

        private AgentType agentType = AgentType.Greedy; //init to Greedy!

        /// <summary>
        /// run the experiment
        /// </summary>
        public override void runExperiment()
        {
            int GAMES = 5000; //number of games to run
            int ROUNDS = 200; //number of round each game

            DataRow row1 ,row2,row3; //a row for each type of agent

            //create a DataTable for MDPDiscover agent's data and set its rows
            dtMDPDiscover = new DataTable("MDPExperiment");
            DataColumn cln = new DataColumn("GameNum", typeof(double));
            dtMDPDiscover.Columns.Add(cln);
            cln = new DataColumn("Competence0", typeof(double));
            dtMDPDiscover.Columns.Add(cln);
            cln = new DataColumn("Competence1", typeof(double));
            dtMDPDiscover.Columns.Add(cln);
            cln = new DataColumn("Competence2", typeof(double));
            dtMDPDiscover.Columns.Add(cln);
            cln = new DataColumn("eps0", typeof(double));
            dtMDPDiscover.Columns.Add(cln);
            cln = new DataColumn("eps1", typeof(double));
            dtMDPDiscover.Columns.Add(cln);
            cln = new DataColumn("eps2", typeof(double));
            dtMDPDiscover.Columns.Add(cln);
            cln = new DataColumn("utility Discover", typeof(double));
            dtMDPDiscover.Columns.Add(cln);

            //create a DataTable for Greedy agent's data and set its rows
            dtGreedy = new DataTable("NormalExperiment");
            cln = new DataColumn("GameNum", typeof(double));
            dtGreedy.Columns.Add(cln);
            cln = new DataColumn("Competence0", typeof(double));
            dtGreedy.Columns.Add(cln);
            cln = new DataColumn("Competence1", typeof(double));
            dtGreedy.Columns.Add(cln);
            cln = new DataColumn("Competence2", typeof(double));
            dtGreedy.Columns.Add(cln);
            cln = new DataColumn("eps0", typeof(double));
            dtGreedy.Columns.Add(cln);
            cln = new DataColumn("eps1", typeof(double));
            dtGreedy.Columns.Add(cln);
            cln = new DataColumn("eps2", typeof(double));
            dtGreedy.Columns.Add(cln);
            cln = new DataColumn("utility Greedy", typeof(double));
            dtGreedy.Columns.Add(cln);

            //create a DataTable for MDPOptimal agent's data and set its rows
            dtMDPOptimal = new DataTable("MDPOptimalExperiment");
            cln = new DataColumn("GameNum", typeof(double));
            dtMDPOptimal.Columns.Add(cln);
            cln = new DataColumn("Competence0", typeof(double));
            dtMDPOptimal.Columns.Add(cln);
            cln = new DataColumn("Competence1", typeof(double));
            dtMDPOptimal.Columns.Add(cln);
            cln = new DataColumn("Competence2", typeof(double));
            dtMDPOptimal.Columns.Add(cln);
            cln = new DataColumn("eps0", typeof(double));
            dtMDPOptimal.Columns.Add(cln);
            cln = new DataColumn("eps1", typeof(double));
            dtMDPOptimal.Columns.Add(cln);
            cln = new DataColumn("eps2", typeof(double));
            dtMDPOptimal.Columns.Add(cln);
            cln = new DataColumn("utility Optimal", typeof(double));
            dtMDPOptimal.Columns.Add(cln);

            //add rows for each round
            for (int i = 0; i < ROUNDS; ++i)
            {
                cln = new DataColumn(i.ToString(), typeof(double));
                dtMDPDiscover.Columns.Add("utility " + cln);
                dtGreedy.Columns.Add("utility " + cln);
                dtMDPOptimal.Columns.Add("utility " + cln);
            }

            //run games
            for (int i = 0; i < GAMES; ++i)
            {
                //start first game with a greedy agent
                agentType = AgentType.Greedy;

                //write to console to see progress
                if (i % 100 == 0)
                    System.Console.WriteLine(i);
                
                ///first - create the agents data so all the games will run with the same data
                ///second - update the data into the agents
                ///third - run the games

                //set new rows for the game
                row1 = dtGreedy.NewRow();
                row2 = dtMDPDiscover.NewRow();
                row3 = dtMDPOptimal.NewRow();

                //enter game number
                row1["GameNum"] = i;
                row2["GameNum"] = i;
                row3["GameNum"] = i;

                //set "random" epsilon for the agents
                eps = Generator.getNextDouble();
                eps1 = Generator.getNextDouble() * 0.1 + 0.9;
                eps2 = Generator.getNextDouble() * 0.1 + 0.9;

                //save the epsilon value in the DataRows
                row1["eps0"] = eps;
                row1["eps1"] = eps1;
                row1["eps2"] = eps2;

                row2["eps0"] = eps;
                row2["eps1"] = eps1;
                row2["eps2"] = eps2;

                row3["eps0"] = eps;
                row3["eps1"] = eps1;
                row3["eps2"] = eps2;

                //set a "random" competence (mean) for each agent
                competence = Generator.getNextDouble() * 0.2 + 0.8;
                competence1 = Generator.getNextDouble() * 0.1 + 0.9;
                competence2 = Generator.getNextDouble() * 0.1;

                //save the competence value in the DataRows
                row1["competence0"] = competence;
                row1["competence1"] = competence1;
                row1["competence2"] = competence2;

                row2["competence0"] = competence;
                row2["competence1"] = competence1;
                row2["competence2"] = competence2;

                row3["competence0"] = competence;
                row3["competence1"] = competence1;
                row3["competence2"] = competence2;


                //*/*/*/*/*/*/* Run The Experiments *\*\*\*\*\*\*\\

                ////Greedy

                // first let's set the environment
                Environment environment = new Environment();

                //Create a new population of players
                //The agentType is greedy so we create a greedy agent and two normal agents
                List<Agent> agents = generatePlayers(environment, eps, eps1, eps2, _logger);

                for (int j = 0; j < NumAgents; j++)
                {
                    Agent agent = agents[j];
                    environment.addAgent(agent);
                }

                //create a game
                MDPGame_old game = (MDPGame_old)GameFactory.create(Games.GameType.MDP, environment, ROUNDS);

                double utility = 0;

                //set the agents with their Competence
                environment.agents[0].Competence = new Blocks(competence);
                environment.agents[1].Competence = new Blocks(competence1);
                environment.agents[2].Competence = new Blocks(competence2);

                //enter to the game the row it needs to write the game data to
                game.Dr1 = row1;

                //run the game and get back the utility of the game
                utility = game.start(_logger); 

                //update utility
                row1["utility Greedy"] = utility;

                //add the game data to the DataTable
                dtGreedy.Rows.Add(row1);


                /////MDPDiscovery

                // first let's set the environment
                environment = new Environment();

                //Create a new population of players
                //The agentType is MDPDiscovery so we create a MDPDiscovery agent and two normal agents
                agents = generatePlayers(environment, eps, eps1, eps2, _logger);

                for (int j = 0; j < NumAgents; j++)
                {
                    Agent agent = agents[j];
                    environment.addAgent(agent);
                }

                //create a game
                game = (MDPGame_old)GameFactory.create(Games.GameType.MDP, environment, ROUNDS);

                utility = 0;

                //set the agents with their Competence
                environment.agents[0].Competence = new Blocks(competence);
                environment.agents[1].Competence = new Blocks(competence1);
                environment.agents[2].Competence = new Blocks(competence2);

                //enter to the game the row it needs to write the game data to
                game.Dr1 = row2;

                //run the game and get back the utility of the game
                utility = game.start(_logger);

                //update utility
                row2["utility Discover"] = utility;

                //add the game data to the DataTable
                dtMDPDiscover.Rows.Add(row2);


                ////MDPOptimal

                // first let's set the environment
                environment = new Environment();

                //Create a new population of players
                //The agentType is MDPOptimal so we create a MDPOptimal agent and two normal agents
                agents = generatePlayers(environment, eps, eps1, eps2, _logger);

                for (int j = 0; j < NumAgents; j++)
                {
                    Agent agent = agents[j];
                    environment.addAgent(agent);
                }

                //create a game
                game = (MDPGame_old)GameFactory.create(Games.GameType.MDP, environment, ROUNDS);

                utility = 0;

                //set the agents with their Competence
                environment.agents[0].Competence = new Blocks(competence);
                environment.agents[1].Competence = new Blocks(competence1);
                environment.agents[2].Competence = new Blocks(competence2);

                //enter to the game the row it needs to write the game data to
                game.Dr1 = row3;

                //run the game and get back the utility of the game
                utility = game.start(_logger);

                //update utility
                row3["utility Optimal"] = utility;

                //add the game data to the DataTable
                dtMDPOptimal.Rows.Add(row3);
               
            }

            //create file names and write to the files
            String stamp = DateTime.Now.ToString("yyyy.MM.dd-HH.mm.ss");
            dtGreedy.WriteXml("GreedyExperiment_" + stamp + ".xml");
            dtMDPDiscover.WriteXml("MDPDiscoverExperiment_" + stamp + ".xml");
            dtMDPOptimal.WriteXml("MDPOptimalExperiment_" + stamp + ".xml");
        }

        /// <summary>
        /// Generate the agents for the game
        /// </summary>
        /// <param name="env">the EnvironmentType of the game</param>
        /// <param name="epsilon0">epsilon for agent 0</param>
        /// <param name="epsilon1">epsilon for agent 1</param>
        /// <param name="epsilon2">epsilon for agent 2</param>
        /// <param name="logger">Logger for logging the data</param>
        /// <returns>a list of agents for the game</returns>
        protected List<Agent> generatePlayers(Environment env, double epsilon0, double epsilon1, double epsilon2, Logger logger)
        {
            int numOfAgents = 3;
            //create agents list
            List<Agent> retList = new List<Agent>(numOfAgents);
            //create director
            AgentDirector director = new AgentDirector();

            switch (agentType)
            {
                case AgentType.Greedy:
                    director.Builder = new Agents.GreedyBuilder(); //create builder
                    agentType=AgentType.MDPDiscovery; //set agent type for next time
                    break;
                case AgentType.MDPDiscovery:
                    director.Builder = new Agents.MDPDiscoverBuilder(); //create builder
                    agentType = AgentType.MDPOptimal; //set agent type for next time
                    break;
                case AgentType.MDPOptimal:
                    //set data to pass to the optimal agent builder
                    double[] compArr = { competence1, competence2 };
                    double[] epsArr = { eps1, eps2 };
                    List<double> compList = new List<double>(compArr);
                    List<double> epsList = new List<double>(epsArr);

                    director.Builder = new Agents.MDPOptimalBuilder(compList, epsList); //create builder
                    agentType = AgentType.Greedy; //set agent type for next time
                    break;
            }

            Agent ag;

            //construct the first agent according to the director
            director.construct(0, env, epsilon0, logger); 
            ag = director.getAgent();
            ag.ChangeStrategyPoint = 10;
            retList.Add(ag);

            //construct the 2nd and 3rd agents as normal agents
            director.Builder = new Agents.NormalBuilder();
            director.construct(1, env, epsilon1, logger);
            retList.Add(director.getAgent());

            director.Builder = new Agents.NormalBuilder();
            director.construct(2, env, epsilon2, logger);
            retList.Add(director.getAgent());

            return retList;
        }
    }
}
