﻿using System;
using System.Collections.Generic;
using AdaptiveAgents.Agents;
using AdaptiveAgents.Agents.Choosers;

namespace AdaptiveAgents.Agents.Choosers
{
    /// <summary>
    /// Chooses next player according to MDP with optimal (true) values in equations variables
    /// </summary>
    class MDPOptimalNextPlayerChooser : MDPDiscoverNextPlayerChooser
    {
        //both arrays stores true values of competences and epsilons
        private List<double> comp;
        private List<double> eps;

        public MDPOptimalNextPlayerChooser(Agent agent, List<double> comp, List<double> eps)
            : base(agent)
        {
            agent.NextPlayerChooser = this;
            this.comp = comp;
            this.eps = eps;
        }

        /// <summary>
        /// Chooses the next performer according to MDP with optimal (true) values in equations variables
        /// </summary>
        /// <param name="maxToConsider"></param>
        /// <returns>index of next performer</returns>
        public override int choose(int maxToConsider)
        {
            NumberOfAgents = _me.Environment.agents.Count;
            // learning phase
            if (_me.ObservationsSet.observations.Count < _me.ChangeStrategyPoint)
            {
                return Generator.getNextInt(NumberOfAgents);
            }
            return ChooseBest(comp, eps);
        }
    }
}
