﻿using System;
using System.Collections.Generic;
using AdaptiveAgents.Agents;
using AdaptiveAgents.Loggers;
using AdaptiveAgents.Games;

namespace AdaptiveAgents.TeammateModels
{
    /// <summary>
    /// A tested methodology for doing the modeling of the other agent
    /// Now we say that if an agent has passed to a different agent the before - this is exploration.
    /// The epsilon value of the agent is set to the percentage that the agent did exploration from all his plays
    /// The competence of the agent is calculated the same as in AdaptiveTeammateModel
    /// </summary>
    class AdaptiveRaiseCompetenceObserveEpsilonTeammateModel : AdaptiveTeammateModel_old
    {
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="me">My agent</param>
        /// <param name="agent">The agent I'm modeling</param>
        /// <param name="env">The environment</param>
        public AdaptiveRaiseCompetenceObserveEpsilonTeammateModel(Agent me, Agent agent, Environment env)
            : base(me, agent, env)
        { }

        /// <summary>
        /// Estimate the other player competence and epsilon
        /// </summary>
        /// <param name="observations">A set of observation to work with</param>
        /// <returns>A 2 size array with the epsilon estimate in cell 0 and the competence estimate in cell 1</returns>
        public override double[] estimateEpsilonAndCometence(List<Observation> observations)
        {
            double[] testArr = new double[9]; //an array that represents the levees of the tree
            double[] result = new double[2]; //the result array

            int agent0; //always the agent being modeled
            int agent1; //the first other agent
            int agent2; //the second other agent

            agent0 = agentID;
            agent1 = (agent0 + 1) % 3;
            agent2 = (agent0 + 2) % 3;

            double[] estimated = new double[3]; //estimated competence of each agent
            int[] count = new int[3]; //number of times the agent played
            int best = myID; //the best player
            double rounds = 0;
            int activePlayer, nextActivePlayer;

            //for each observation
            foreach (Observation obs in observations)
            {
                //add this observation competence to the agent average competence
                estimated[obs.ID] = (estimated[obs.ID] * count[obs.ID] + obs.Value) / (count[obs.ID] + 1);
                ++count[obs.ID];

                //check if we have a new "best"
                if (estimated[obs.ID] > estimated[best])
                    best = obs.ID;
                //check if the best agent is still less then the minimum for the agent we modeling
                if (estimated[best] < minEstimate)
                    best = agent0;

                activePlayer = obs.ID;
                nextActivePlayer = obs.NextAgent;

                //place the observation in the suitable place in the tree
                if (activePlayer == agent0)
                {
                    ++rounds;
                    if (best == agent0)
                    {
                        if (nextActivePlayer == agent0) // 1-Ɛ (exploitation)
                            ++testArr[0];
                        else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                            ++testArr[1];
                        else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                            ++testArr[2];
                    }
                    else if (best == agent1)
                    {
                        if (nextActivePlayer == agent0)
                        {
                            //agent 1 is the one we think is best, but agent 0 (the one we model) is passing to himself
                            //that means that agent 0 knows he is better then agent 1,
                            //so we update his minimal estimate
                            ++testArr[3];
                            --rounds;
                            minEstimate = estimated[best] + 0.00001;
                        }
                        else if (nextActivePlayer == agent1) // 1-Ɛ/2 (exploitation or exploration)
                            ++testArr[4];
                        else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                            ++testArr[5];
                    }
                    else //best = agent2
                    {
                        if (nextActivePlayer == agent0)
                        {
                            //agent 2 is the one we think is best, but agent 0 (the one we model) is passing to himself
                            //that means that agent 0 knows he is better then agent 2,
                            //so we update his minimal estimate
                            ++testArr[6];
                            --rounds;
                            minEstimate = estimated[best] + 0.00001;
                        }
                        else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                            ++testArr[7];
                        else //nextActivePlayer=agent2 // 1-Ɛ/2 (exploitation or exploration)
                            ++testArr[8];
                    }
                }
            }

            //Estimate epsilon by observation
            int epsCounter = 0; //exploration counter

            int lastPass = -1; //number of agent that the agent passed last time
            int compCounter = 0;
            double compSum = 0;
            foreach (Observation obs in observations)
            {
                if (obs.ID == this.agentID)
                {
                    compCounter += 1; //count number of plays by the agent

                    //for epsilon
                    if (lastPass == -1) //first time only
                        lastPass = obs.NextAgent;
                    else if (obs.NextAgent != lastPass) // (exploration)
                        ++epsCounter;

                    lastPass = obs.NextAgent;
                }
            }

            result[0] = (epsCounter * 1.0) / compCounter;
            result[1] = Math.Max(estimated[agent0], minEstimate);
            return result;
        }
    }
}
