﻿using System;
using System.Collections.Generic;
using System.Text;
using AdaptiveAgents.Agents;
using AdaptiveAgents.Loggers;

namespace AdaptiveAgents.TeammateModels
{
    /// <summary>
    /// A class that tries to modulate an other agent ability
    /// </summary>
    public class TeammateModel
    {

        protected int agentID; // the ID of the agent being modeled
        private Agent agent;
        protected int myID; // the ID of the agent doing the modeling
        private Agent me;
        private Environment environment;
        private int memoryPerAgent; //memory (rounds number) of the agent doing the modeling
        private int _explorations; //What is this??
        private Boolean _isExploration = false;

        //the minimum possible estimation of the agent competence
        public double minEstimate = 0;

        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="me">My agent</param>
        /// <param name="agent">The agent I want to model</param>
        /// <param name="env">The environment</param>
        public TeammateModel(Agent me, Agent agent, Environment env)
        {
            this.agent = agent;
            this.agentID = agent.ID;
            this.me = me;
            this.myID = me.ID;
            this.environment = env;
            this._explorations = 0;
            memoryPerAgent = me.Memory;
        }

        public bool isExploration
        {
            get { return _isExploration; }
            set { _isExploration = value; }
        }

        /// <summary>
        /// Get the ID of the agent doing the modeling
        /// </summary>
        /// <returns></returns>
        public int getID()
        {
            return agentID;
        }

        /// <summary>
        /// Estimate the competance of the agent we model
        /// </summary>
        /// <returns></returns>
        public double EstimateCompetence()
        {

            double comp = Double.MaxValue; // optimistic initialization
            comp = .5; // realistic initialization

            // NOTE: It would probably be better to store these observations in
            // this teammateModel class as they come in rather than having
            // to pull them out of the full observationset every time...
            
            //get the observation of the agent we model
            ObservationsSet agentObservations = me.getObservationByID(agentID, memoryPerAgent);

            AdaptiveAgents.logger.WriteLine(MessageType.ObservationsSoFar, agentObservations.getSize());
            

            if (agentObservations.getSize() > 0)
            {
                //calculate the avarage of the values that the other agent did
                comp = 0;
                foreach (Observation observation in agentObservations.observations)
                { 
                    comp += observation.Value;
                }
                comp /= agentObservations.getSize();
            }

            AdaptiveAgents.logger.WriteLine(MessageType.EstimatedCompetence, agent.ID, comp);
            return comp;
        }

        /// <summary>
        /// Get a list of Best Players
        /// </summary>
        /// <returns>a list of the best players</returns>
        public List<int> getBest()
        {
            return me.predictNextPerformer();
        }



        /// <summary>
        /// Estimating agent's epsilon. Code is still in progress and untested.
        /// </summary>
        /// <param name="selectedAgent">ID of the agent who's epsilon is updated</param>
        /// <returns>True - if agent is acting as expected</returns>
        public bool updateEpsilon(int selectedAgent)
        {
            //UNDONE - code is still in progress and untested
            List<int> expectedSelections = me.predictNextPerformer();

            if (expectedSelections.Contains(selectedAgent))
            {
                //the agent acted as expected
                return true;
            }
            else
            {
                _explorations++;
                // NOTE: Should start storing numActions in the teammate model
                /*int actions = environment.agents[agentID].NumActions;
                double epsilon = (double)explorations / (double)actions;
                
                AdaptiveAgents.logger.WriteLine(MessageType.EstimatedEpsilon,
                        agentID, epsilon, explorations, actions);*/
                return false;
            }
        }

        /// <summary>
        /// Print data to Loggers
        /// </summary>
        /// <param name="message">Message to print</param>
        public void print(MessageType message)
        {
            AdaptiveAgents.logger.WriteLine(message, "Model of agent {0}", agentID);
        }

        /// <summary>
        /// Estimate the other player competence and epsilon
        /// </summary>
        /// <param name="observations">A set of observation to work with</param>
        /// <returns>A 2 size array with the epsilon estimate in cell 0 and the competence estimate in cell 1</returns>
        public virtual double[] estimateEpsilonAndCometence1(List<Observation> observations)
        {
            double[] testArr = new double[9]; //an array that represents the levees of the tree
            double[] result = new double[2]; //the result array

            int agent0; //always the agent being modeled
            int agent1; //the first other agent
            int agent2; //the second other agent

            agent0 = agentID;
            agent1 = (agent0 + 1) % 3;
            agent2 = (agent0 + 2) % 3;

            double[] estimated = new double[3]; //estimated competence of each agent
            int[] count = new int[3]; //number of times the agent played
            int best = myID; //the best player
            double rounds = 0;
            int activePlayer, nextActivePlayer;

            //for each observation
            foreach (Observation obs in observations)
            {
                //add this observation competence to the agent average competence
                estimated[obs.ID] = (estimated[obs.ID] * count[obs.ID] + obs.Value) / (count[obs.ID] + 1);
                ++count[obs.ID];

                //check if we have a new "best"
                if (estimated[obs.ID] > estimated[best])
                    best = obs.ID;
                //check if the best agent is still less then the minimum for the agent we modeling
                if (estimated[best] < minEstimate)
                    best = agent0;

                activePlayer = obs.ID;
                nextActivePlayer = obs.NextAgent;

                //place the observation in the suitable place in the tree
                if (activePlayer == agent0)
                {
                    ++rounds;
                    if (best == agent0)
                    {
                        if (nextActivePlayer == agent0) // 1-Ɛ (exploitation)
                            ++testArr[0];
                        else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                            ++testArr[1];
                        else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                            ++testArr[2];
                    }
                    else if (best == agent1)
                    {
                        if (nextActivePlayer == agent0)
                        {
                            //agent 1 is the one we think is best, but agent 0 (the one we model) is passing to himself
                            //that means that agent 0 knows he is better then agent 1,
                            //so we update his minimal estimate
                            ++testArr[3];
                            --rounds;
                            minEstimate = estimated[best] + 0.00001;
                        }
                        else if (nextActivePlayer == agent1) // 1-Ɛ/2 (exploitation or exploration)
                            ++testArr[4];
                        else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                            ++testArr[5];
                    }
                    else //best = agent2
                    {
                        if (nextActivePlayer == agent0)
                        {
                            //agent 2 is the one we think is best, but agent 0 (the one we model) is passing to himself
                            //that means that agent 0 knows he is better then agent 2,
                            //so we update his minimal estimate
                            ++testArr[6];
                            --rounds;
                            minEstimate = estimated[best] + 0.00001;
                        }
                        else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                            ++testArr[7];
                        else //nextActivePlayer=agent2 // 1-Ɛ/2 (exploitation or exploration)
                            ++testArr[8];
                    }
                }
            }

            //calculate epsilon value according to his place in the tree
            double[] epsilons = new double[7];
            if (rounds > 0)
            {
                epsilons[0] = (rounds - testArr[0]) / rounds;
                epsilons[1] = (2 * testArr[1]) / rounds;
                epsilons[2] = (2 * testArr[2]) / rounds;
                epsilons[3] = (2 * (rounds - testArr[4])) / rounds;
                epsilons[4] = (2 * testArr[5]) / rounds;
                epsilons[5] = (2 * testArr[7]) / rounds;
                epsilons[6] = (2 * (rounds - testArr[8])) / rounds;
            }

            //normalize so the max epsilon will be 1
            for (int i = 0; i < 7; ++i)
            {
                if (epsilons[i] > 1)
                    epsilons[i] = 1;
            }

            //calculate the weighted average of the epsilon
            double epsilon = (epsilons[0] * testArr[0] + epsilons[1] * testArr[1] +
                epsilons[2] * testArr[2] + epsilons[3] * testArr[4] +
                epsilons[4] * testArr[5] + epsilons[5] * testArr[7] +
                epsilons[6] * testArr[8]) / rounds;

            //prepare the return data
            result[0] = (Double.IsNaN(epsilon) ? 0.5 : epsilon);
            result[1] = Math.Max(estimated[agent0], minEstimate);
            return result;
        }

        // Note: Need to check the following before usage - has not been tested!
        ///// <summary>
        ///// Estimate the other player competence and epsilo
        ///// </summary>
        ///// <param name="observations">A set of observation to work with</param>
        ///// <returns>A 2 size array with the epsilon estimate in cell 0 and the competence estimate in cell 1</returns>
        //public virtual double[] estimateEpsilonAndCometenceAlternative(List<Observation> observations)
        //{
        //    double[] testArr = new double[9]; //an array that represents the levees of the tree
        //    double[] result = new double[2]; //the result array

        //    int agent0; //always the agent being modeled
        //    int agent1; //the first other agent
        //    int agent2; //the second other agent

        //    agent0 = agentID;
        //    agent1 = (agent0 + 1) % 3;
        //    agent2 = (agent0 + 2) % 3;

        //    double[] estimated = new double[3] { 0.0, 0.0, 0.0 }; //estimated competence of each agent
        //    int[] count = new int[3] { 0, 0, 0 }; //number of times the agent played
        //    int best = myID; //the best player
        //    double rounds = 0;
        //    int nextActivePlayer;
        //    double estimate = 0.0;
        //    int countRightMoves = 0;

        //    //for each observation
        //    foreach (Observation obs in observations)
        //    {
        //        rounds++;
        //        count[obs.ID]++;
        //        nextActivePlayer = obs.NextAgent;
        //        estimate = estimated[obs.ID] * (count[obs.ID] - 1) + obs.Value;
        //        estimated[obs.ID] = estimate / count[obs.ID];
        //        if (obs.ID != agent0 || obs.NextAgent == -1)
        //        {
        //            continue;
        //        }
        //        if (estimated[nextActivePlayer] > estimated[(nextActivePlayer + 1) % 3]
        //          && estimated[nextActivePlayer] > estimated[(nextActivePlayer + 2) % 3])
        //        {
        //            countRightMoves++;
        //        }
        //    }
        //    result[0] = (countRightMoves == 0) ? 1 : 1 - ((double)countRightMoves / count[agent0]);
        //    result[1] = estimated[agent0];

        //    return result;
        //}
    }
}
