﻿using System;
using System.Collections.Generic;
using AdaptiveAgents.Loggers;
using AdaptiveAgents.Agents;

namespace AdaptiveAgents.TeammateModels
{
    /// <summary>
    /// A tested methodology for doing the modeling of the other agent
    /// This is working almost the same way as the AdaptiveTeammateModel, 
    /// but here we don't raise the epsilon when we see that an agent is better then we think
    /// </summary>
    class AdaptiveNoRaiseCompetenceTeammateModel : AdaptiveTeammateModel_old
    {
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="me">My agent</param>
        /// <param name="agent">The agent I'm modeling</param>
        /// <param name="env">The environment</param>
        public AdaptiveNoRaiseCompetenceTeammateModel(Agent me, Agent agent, Environment env)
            : base(me, agent, env)
        { }

        /// <summary>
        /// Estimate the other player competence and epsilon
        /// </summary>
        /// <param name="observations">A set of observation to work with</param>
        /// <returns>A 2 size array with the epsilon estimate in cell 0 and the competence estimate in cell 1</returns>
        public override double[] estimateEpsilonAndCometence(List<Observation> observations)
        {
            double[] testArr = new double[9]; //an array that represents the levees of the tree
            double[] result = new double[2]; //the result array

            int agent0; //always the agent being modeled
            int agent1; //the first other agent
            int agent2; //the second other agent

            agent0 = agentID;
            agent1 = (agent0 + 1) % 3;
            agent2 = (agent0 + 2) % 3;

            double[] estimated = new double[3]; //estimated competence of each agent
            int[] count = new int[3]; //number of times the agent played
            int best = myID; //the best player
            double rounds = 0;
            int activePlayer, nextActivePlayer;

            //for each observation
            foreach (Observation obs in observations)
            {
                //add this observation competence to the agent average competence
                estimated[obs.ID] = (estimated[obs.ID] * count[obs.ID] + obs.Value) / (count[obs.ID] + 1);
                ++count[obs.ID];

                //check if we have a new "best"
                if (estimated[obs.ID] > estimated[best])
                    best = obs.ID;
                //check if the best agent is still less then the minimum for the agent we modeling
                if (estimated[best] < minEstimate)
                    best = agent0;

                activePlayer = obs.ID;
                nextActivePlayer = obs.NextAgent;

                //place the observation in the suitable place in the tree
                if (activePlayer == agent0)
                {
                    ++rounds;
                    if (best == agent0)
                    {
                        if (nextActivePlayer == agent0) // 1-Ɛ (exploitation)
                            ++testArr[0];
                        else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                            ++testArr[1];
                        else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                            ++testArr[2];
                    }
                    else if (best == agent1)
                    {
                        if (nextActivePlayer == agent0)
                        {
                            //agent 1 is the one we think is best, but agent 0 (the one we model) is passing to himself
                            //that means that agent 0 knows he is better then agent 1,
                            //but in this model - we don't update his minimal estimate
                            ++testArr[3];
                            --rounds;
                            //minEstimate = estimated[best] + 0.00001;
                        }
                        else if (nextActivePlayer == agent1) // 1-Ɛ/2 (exploitation or exploration)
                            ++testArr[4];
                        else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                            ++testArr[5];
                    }
                    else //best = agent2
                    {
                        if (nextActivePlayer == agent0)
                        {
                            //agent 2 is the one we think is best, but agent 0 (the one we model) is passing to himself
                            //that means that agent 0 knows he is better then agent 2,
                            //but in this model - we don't update his minimal estimate
                            ++testArr[6];
                            --rounds;
                            //minEstimate = estimated[best] + 0.00001;
                        }
                        else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                            ++testArr[7];
                        else //nextActivePlayer=agent2 // 1-Ɛ/2 (exploitation or exploration)
                            ++testArr[8];
                    }
                }
            }

            //calculate epsilon value according to his place in the tree
            double[] epsilons = new double[7];
            if (rounds > 0)
            {
                epsilons[0] = (rounds - testArr[0]) / rounds;
                epsilons[1] = (2 * testArr[1]) / rounds;
                epsilons[2] = (2 * testArr[2]) / rounds;
                epsilons[3] = (2 * (rounds - testArr[4])) / rounds;
                epsilons[4] = (2 * testArr[5]) / rounds;
                epsilons[5] = (2 * testArr[7]) / rounds;
                epsilons[6] = (2 * (rounds - testArr[8])) / rounds;
            }

            //normalize so the max epsilon will be 1
            for (int i = 0; i < 7; ++i)
            {
                if (epsilons[i] > 1)
                    epsilons[i] = 1;
            }

            //calculate the weighted average of the epsilon
            double epsilon = (epsilons[0] * testArr[0] + epsilons[1] * testArr[1] +
                epsilons[2] * testArr[2] + epsilons[3] * testArr[4] +
                epsilons[4] * testArr[5] + epsilons[5] * testArr[7] +
                epsilons[6] * testArr[8]) / rounds;

            //prepare the return data
            result[0] = (Double.IsNaN(epsilon) ? 0.5 : epsilon);
            result[1] = Math.Max(estimated[agent0], minEstimate);
            return result;
        }
    }
}
