﻿using System;
using System.Collections.Generic;
using System.Text;
using AdaptiveAgents.TeammateModels;
using AdaptiveAgents.Formulas;
using AdaptiveAgents.Loggers;

namespace AdaptiveAgents.Agents.Choosers
{
    /// <summary>
    /// Chooses next player according to MDP
    /// </summary>
    class MDPDiscoverNextPlayerChooser : NextPlayerChooser
    {
        protected NAgentsTool tool;

        public MDPDiscoverNextPlayerChooser(Agent agent)
            : base(agent)
        {
            tool = new NAgentsTool(NumberOfAgents);
            agent.NextPlayerChooser = this;
        }

        
       /// <summary>
       /// 
       /// </summary>
       /// <param name="results"></param>
       /// <returns></returns>
        protected int chooseMaxValue(List<double> results)
        {
            //List<int> tieList = new List<int>();

            //double max = 0;
            //for (int i = 0; i < results.Count; i++)
            //{
            //    if (results[i] > max)
            //    {
            //        //reset high results list
            //        tieList = new List<int>();
            //        tieList.Add(i);
            //        max = results[i];
            //    }
            //    else if (results[i] == max)
            //    {
            //        tieList.Add(i);
            //    }
            //}
            //return tieList[Generator.getNextInt(tieList.Count)];
            int maxIndex = 0;
            double max = 0;
            for(int i=0; i<results.Count; i++)
            {
                if (results[i] > max)
                {
                    max = results[i];
                    maxIndex = i;
                }
            }

            return maxIndex;
        }

        /// <summary>
        /// Chooses the next performer according to MDP
        /// </summary>
        /// <param name="maxToConsider"></param>
        /// <returns>index of next performer</returns>
        public override int choose(int maxToConsider)
        {
            NumberOfAgents = _me.Environment.agents.Count;
            // learning phase
            if (_me.ObservationsSet.observations.Count < _me.ChangeStrategyPoint) 
            {
                return Generator.getNextInt(NumberOfAgents);
            }

          
            int[] agents = new int[NumberOfAgents];
            for (int i = 0; i < NumberOfAgents; i++)
            {
                agents[i] = (_me.ID + i) % NumberOfAgents;
            }

            List<double> competenceList = new List<double>(NumberOfAgents);
            List<double> epsilonList = new List<double>(NumberOfAgents);

            epsilonList.Add(_me.Epsilon);
            competenceList.Add(_me.Competence.Mean);   

            for (int i = 1; i < NumberOfAgents; i++)
            {
                double[] data = ((AdaptiveTeammateModel)_me.getTeammateModel(i)).estimateEpsilonAndCometence(_me.ObservationsSet.observations);

                epsilonList.Add(data[0]);
                competenceList.Add(data[1]);
            }

            return ChooseBest(competenceList, epsilonList);
        }

        protected int ChooseBest(List<double> competenceList, List<double> epsilonList)
        {
            List<double> result = tool.Solve(competenceList, epsilonList);
            int index = chooseMaxValue(result);

            return index;
        }
    }
}
