﻿using System;
using System.Collections.Generic;
using System.Text;
using AdaptiveAgents.Agents;

namespace AdaptiveAgents.TeammateModels
{
    class AdaptiveTeammateModel : TeammateModel
    { //the minimum possible estimation of the agent competence
        public double minEstimate = 0;
        public bool UpdateCompetance { get; set; }
        public int NumberOfAgents {get; protected set;}


   
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="me">My agent</param>
        /// <param name="agent">The agent I'm modeling</param>
        /// <param name="env">The environment</param>
        public AdaptiveTeammateModel(Agent me, Agent agent, Environment env, bool updateCompetance)
            : base(me, agent, env)
        {
            NumberOfAgents = env.agents.Count;
            UpdateCompetance = updateCompetance;
        }

        /// <summary>
        /// Estimate the other player competence and epsilon
        /// </summary>
        /// <param name="observations">A set of observation to work with</param>
        /// <returns>A 2 size array with the epsilon estimate in cell 0 and the competence estimate in cell 1</returns>
        public virtual double[] estimateEpsilonAndCometence(List<Observation> observations)
        {
            //an array that represents the leaves of the tree
            double[,] testArr = new double[NumberOfAgents, NumberOfAgents];


            double[] result = new double[2]; //the result array

            // agent 0 is always agentID
            int[] agents = new int[NumberOfAgents];
            for (int i = 0; i < NumberOfAgents; i++)
            {
                agents[i] = (agentID + i) % NumberOfAgents;
            }

            double[] estimatedCompetence = new double[NumberOfAgents]; //estimated competence of each agent

            int[] count = new int[NumberOfAgents]; //number of times the agent played
            int best = myID; //the best player
            double rounds = 0;
            int activePlayer, nextActivePlayer;

            //for each observation
            foreach (Observation obs in observations)
            {
                //add this observation competence to the agent average competence
                estimatedCompetence[obs.ID] = (estimatedCompetence[obs.ID] * count[obs.ID] + obs.Value) / (count[obs.ID] + 1);
                ++count[obs.ID];

                //check if we have a new "best"
                if (estimatedCompetence[obs.ID] > estimatedCompetence[best])
                    best = obs.ID;
                //check if the best agent is still less then the minimum for the agent we modeling
                if (estimatedCompetence[best] < minEstimate)
                    best = agents[0];

                activePlayer = obs.ID;
                nextActivePlayer = obs.NextAgent;

                if (nextActivePlayer != -1)
                {
                    //place the observation in the suitable place in the tree
                    if (activePlayer == agents[0])
                    {
                        ++rounds;
                        testArr[GetMyIndex(best), GetMyIndex(nextActivePlayer)]++;

                        if (best == agents[0])
                        {
                            
                        }
                        else //if (best == agents[1])
                        {

                            if (nextActivePlayer == agentID)
                            {
                                //agent 1 is the one we think is best, but agent 0 (the one we model) is passing to himself
                                //that means that agent 0 knows he is better then agent 1,
                                //so we update his minimal estimate

                                if (UpdateCompetance)
                                {
                                    --rounds;
                                    minEstimate = estimatedCompetence[best] + 0.00001;
                                }
                            }
                        }
                    }
                }
            }


            

            //calculate epsilon value according to his place in the tree
            double[,] epsilons = new double[NumberOfAgents, NumberOfAgents];
            double epsilon = 0.5;
            if (rounds > 0)
            {
                for (int i = 0; i < NumberOfAgents; i++)
                {
                    for (int j = 0; j < NumberOfAgents; j++)
                    {
                        if (j == 0)
                        {
                            if (i == 0)
                            {
                                // 1-epsilon = counter/rounds
                                // epsilon = 1-counter/rounds = (rounds-counter)*rounds
                                epsilons[i, j] = (rounds - testArr[i, j]) / rounds;
                            }
                            // go to next loop
                            continue;

                        }

                        double counter = testArr[i, j];
                        // i == best
                        if (i == j)
                        {
                            // 1-epsilon + epsilon/N-1 = counter/rounds
                            // epsilon = ((rounds-counter)* (N-1))/ (rounds * (N-2))
                            epsilons[i, j] = ((rounds - counter) * (NumberOfAgents - 1)) / (rounds * (NumberOfAgents - 2));
                        }
                        else
                        {
                            // epsilon/N-1 = counter/rounds
                            // epsilon = ( counter / rounds ) * (N-1)
                            epsilons[i, j] = (counter / rounds) * (NumberOfAgents - 1);
                        }


                    }
                }

                //normalize so the max epsilon will be 1
                for (int i = 0; i < NumberOfAgents; i++)
                {
                    for (int j = 0; j < NumberOfAgents; j++)
                    {
                        if (epsilons[i, j] > 1)
                        {
                            epsilons[i, j] = 1;
                        }
                    }
                }

                //calculate the weighted average of the epsilon
                double sum = 0;
                for (int i = 0; i < NumberOfAgents; i++)
                {
                    for (int j = 0; j < NumberOfAgents; j++)
                    {
                        sum += epsilons[i, j] * testArr[i, j];
                    }

                }
                epsilon = sum / rounds;
            }


            //double[] oldResult = estimateEpsilonAndCometenceOld(observations);

            //prepare the return data
            //if (epsilon == 0)
            //{
            //    epsilon = 0.5;
            //}

            result[0] = epsilon;
            result[1] = Math.Max(estimatedCompetence[agents[0]], minEstimate);
            

            //if (Math.Abs(result[0] - oldResult[0]) > 0.000001 || Math.Abs(result[1] - oldResult[1]) > 0.000001)
            //{
            //    //throw new Exception("failed");
            //    if (!stop)
            //    {
            //        estimateEpsilonAndCometenceOld(observations);
            //        estimateEpsilonAndCometence(observations);
            //        stop = true;
            //    }
            //}

            //stop = false;
            return result;
        }

        public bool stop = false;

        private int GetMyIndex(int realIndex)
        {
            return (realIndex + NumberOfAgents - agentID) % NumberOfAgents;
        }

       

        /*public virtual double[] estimateEpsilonAndCometenceOld(List<Observation> observations)
        {
            //double[] testArr = new double[NumberOfAgents * NumberOfAgents];

            double[] testArr = new double[9]; //an array that represents the levees of the tree
            double[] result = new double[2]; //the result array

            int agent0; //always the agent being modeled
            int agent1; //the first other agent
            int agent2; //the second other agent

            agent0 = agentID;
            agent1 = (agent0 + 1) % 3;
            agent2 = (agent0 + 2) % 3;

            double[] estimated = new double[3]; //estimated competence of each agent
            int[] count = new int[3]; //number of times the agent played
            int best = myID; //the best player
            double rounds = 0;
            int activePlayer, nextActivePlayer;

            //for each observation
            foreach (Observation obs in observations)
            {
                //add this observation competence to the agent average competence
                estimated[obs.ID] = (estimated[obs.ID] * count[obs.ID] + obs.Value) / (count[obs.ID] + 1);
                ++count[obs.ID];

                //check if we have a new "best"
                if (estimated[obs.ID] > estimated[best])
                    best = obs.ID;
                //check if the best agent is still less then the minimum for the agent we modeling
                if (estimated[best] < minEstimate2)
                    best = agent0;

                activePlayer = obs.ID;
                nextActivePlayer = obs.NextAgent;
                if (nextActivePlayer != -1)
                {

                    //place the observation in the suitable place in the tree
                    if (activePlayer == agent0)
                    {
                        ++rounds;
                        if (best == agent0)
                        {
                            if (nextActivePlayer == agent0) // 1-Ɛ (exploitation)
                                ++testArr[0];
                            else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                                ++testArr[1];
                            else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                                ++testArr[2];
                        }
                        else if (best == agent1)
                        {
                            if (nextActivePlayer == agent0)
                            {
                                //agent 1 is the one we think is best, but agent 0 (the one we model) is passing to himself
                                //that means that agent 0 knows he is better then agent 1,
                                //so we update his minimal estimate
                                ++testArr[3];
                                --rounds;
                                minEstimate2 = estimated[best] + 0.00001;
                            }
                            else if (nextActivePlayer == agent1) // 1-Ɛ/2 (exploitation or exploration)
                                ++testArr[4];
                            else //nextActivePlayer=agent2 // Ɛ/2 (exploration)
                                ++testArr[5];
                        }
                        else //best = agent2
                        {
                            if (nextActivePlayer == agent0)
                            {
                                //agent 2 is the one we think is best, but agent 0 (the one we model) is passing to himself
                                //that means that agent 0 knows he is better then agent 2,
                                //so we update his minimal estimate
                                ++testArr[6];
                                --rounds;
                                minEstimate2 = estimated[best] + 0.00001;
                            }
                            else if (nextActivePlayer == agent1) // Ɛ/2 (exploration)
                                ++testArr[7];
                            else //nextActivePlayer=agent2 // 1-Ɛ/2 (exploitation or exploration)
                                ++testArr[8];
                        }
                    }
                }
            }
            //calculate epsilon value according to his place in the tree
            double[] epsilons = new double[7];
            if (rounds > 0)
            {
                epsilons[0] = (rounds - testArr[0]) / rounds;
                epsilons[1] = (2 * testArr[1]) / rounds;
                epsilons[2] = (2 * testArr[2]) / rounds;
                epsilons[3] = (2 * (rounds - testArr[4])) / rounds;
                epsilons[4] = (2 * testArr[5]) / rounds;
                epsilons[5] = (2 * testArr[7]) / rounds;
                epsilons[6] = (2 * (rounds - testArr[8])) / rounds;
            }

            //normalize so the max epsilon will be 1
            for (int i = 0; i < 7; ++i)
            {
                if (epsilons[i] > 1)
                    epsilons[i] = 1;
            }

            //calculate the weighted average of the epsilon
            double epsilon = (epsilons[0] * testArr[0] + epsilons[1] * testArr[1] +
                epsilons[2] * testArr[2] + epsilons[3] * testArr[4] +
                epsilons[4] * testArr[5] + epsilons[5] * testArr[7] +
                epsilons[6] * testArr[8]) / rounds;

            //prepare the return data
            result[0] = (Double.IsNaN(epsilon) ? 0.5 : epsilon);
            result[1] = Math.Max(estimated[agent0], minEstimate2);

         
            return result;
        }*/
    }
}
