﻿using System;
using System.Collections.Generic;
using AdaptiveAgents.Loggers;
using AdaptiveAgents.Agents;
using AdaptiveAgents.Games;
using AdaptiveAgents.Distributions;


namespace AdaptiveAgents.Experiments
{
    /// <summary>
    /// This experiment is created for testing the rectangle (Block) distribution
    /// This test is running with the old output method (the logger method).
    /// in this method we need to write each row, and only when we finish - go to the next row!
    /// </summary>
    public class RectangleTstExperiment : Experiment
    {
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="logger">a logger to log the experiment</param>
        public RectangleTstExperiment(Logger logger)
            : base(50, 3, 1)
        {
            this._logger = logger;
        }

        /// <summary>
        /// Run the main experiment
        /// </summary>
        public override void runExperiment()
        {
            //data members - constants
            double START = 0.0;
            double JUMP = 2;
            double END = 1;
            int SETS = 1;
            int GAMES = 1;

            double[] utilData = new double[1];

            //create loggers
            String stamp = DateTime.Now.ToString("yyyy.MM.dd-HH.mm.ss"); 
            Logger logUtil = new GraphLogger("AE-util-" + stamp + ".csv");
            Logger logEps = new GraphLogger("AE-eps-" + stamp + ".csv");

            //create a 2 dimensional set of distributions
            List<IDistribution> pack;
            List<List<IDistribution>> distributionsPack = new List<List<IDistribution>>(SETS);
            //enter distributions to the sets
            for (int i = 0; i <= SETS; i++)
            {
                distributionsPack.Add(new List<IDistribution>(3));
                for (int j = 0; j < 3; j++)
                {
                    pack = distributionsPack[i];
                    pack.Add(new Blocks());
                    pack[j].generateValue();
                }
            }
            
            //print rounds to the loggers
            logUtil.Write(MessageType.EpsilonGraph, "");
            for (double i = START; i < END; i += JUMP)
            {
                logUtil.Write(MessageType.EpsilonGraph, "{0}", i);
                logEps.Write(MessageType.EpsilonGraph, "{0}", i);
            }

            //start running experiment

            double utility = 0;
            double maxUtil = 0;
            double maxEps = 0;
            double maxAvgUtil = 0;
            double maxAvgEps = 0;

            //for each epsilon of the other 2 agents...
            for (double eps = START; eps <= END; eps += JUMP)
            {
                maxAvgUtil = 0; //to save the maximal average utility for this epsilon
                logUtil.Write(MessageType.EpsilonGraph, "{0:f2}", eps); //print the epsilon value
                maxUtil = 0;
                System.Console.WriteLine("esp: " + eps);
                //for each epsilon of the adaptive agent...
                for (double seps = START; seps <= END; seps += JUMP)
                {
                    System.Console.WriteLine("seps: " + seps);
                    
                    double utilAvgSum = 0;
                    
                    //for each set...
                    for (int set = 0; set < SETS; set++)
                    {
                        
                        if (set % GAMES == 0)
                            System.Console.WriteLine("set: " + set);

                        // first let's set the environment
                        Environment environment = new Environment();

                        //Create a new population of players with different epsilon for the adaptive and the normal agents
                        List<Agent> agents = generatePlayers(NumAgents, environment, 1, seps, eps, null);

                        //enter the competence to the agent
                        for (int i = 0; i < NumAgents; i++)
                        {
                            Agent agent = agents[i];

                            agent.Competence = (distributionsPack[set])[i];
                            environment.addAgent(agent);
                        }

                        //init the Teammate Model
                        foreach (Agent agent in environment.agents)
                        {
                            agent.initializeTeammateModels(environment.agents.Count);
                        }

                        //create game
                        Game game = GameFactory.create(Games.GameType.RectangleTest, environment, numRounds);
                        utilitySum = 0;
                        utility = 0;

                        //run GAMES games
                        for (int count = 0; count < GAMES; count++)
                        {
                            //run the game and sum the utility
                            utility = game.start(_logger);
                            utilitySum += utility;
                        }

                        //save the max utility and the adaptive agent epsilon that created it
                        if (maxUtil < utilitySum)
                        {
                            maxUtil = utilitySum;
                            maxEps = seps;
                        }

                        utilAvgSum += utilitySum / GAMES; //sum the avg of GAMES games
                        //end of "GAMES" games run
                    }
                    //write average utility
                    logUtil.Write(MessageType.EpsilonGraph, "{0:f3}", utilAvgSum / SETS);
                    //get the max average of all the "GAMES" games
                    if (maxAvgUtil < (utilAvgSum / SETS))
                    {
                        maxAvgUtil = (utilAvgSum / SETS);
                        maxAvgEps = seps;
                    }

                }//end of seps
                logEps.Write(MessageType.EpsilonGraph, "{0:f3} /-> {1:f2}", maxAvgUtil, maxAvgEps);
                logUtil.WriteLine(MessageType.EpsilonGraph, "");
            }//end of eps
            logEps.Write(MessageType.HackMessage, "");
            logEps.Close();
            logUtil.Close();
        }
       
        /// <summary>
        /// Create adaptive and normal players each with different epsilon
        /// </summary>
        /// <param name="numOfAgents">number of total agents</param>
        /// <param name="env">environment</param>
        /// <param name="numAdaptive">number of adaptive agents</param>
        /// <param name="adaptiveEpsilon">epsilon value for the adaptive agent</param>
        /// <param name="normalEpsilon">epsilon for the normal agents</param>
        /// <param name="logger">logger to log the data</param>
        /// <returns>list of new agents created</returns>
        protected override List<Agent> generatePlayers(int numOfAgents, Environment env,int numAdaptive, double adaptiveEpsilon, double normalEpsilon, Logger logger)
        {
            //set a list of agents
            List<Agent> retList = new List<Agent>(numOfAgents);

            AgentDirector director = new AgentDirector();
            //create adaptive agents
            for (int i = 0; i < numAdaptive; ++i)
            {
                director.Builder = new Agents.NormalBuilder();
                director.construct(0, env, adaptiveEpsilon, logger);
                retList.Add(director.getAgent());
            }

            //create normal agents
            for (int i = numAdaptive; i < NumAgents; ++i)
            {
                director.Builder = new Agents.NormalBuilder();
                director.construct(i, env, normalEpsilon, logger);
                retList.Add(director.getAgent());
            }
            return retList;
        }
    }
}
