﻿using System;
using System.Collections.Generic;
using System.Text;
using AdaptiveAgents.Loggers;
using AdaptiveAgents.Agents;

namespace AdaptiveAgents.Games
{
    /// <summary>
    /// A game created for rectangle dist test
    /// </summary>
    class RectangleTestGame:Game
    {
        public RectangleTestGame(Environment environment, int numOfRounds)
            : base(environment, numOfRounds)
        { }

        /// <summary>
        /// Start playing the game
        /// </summary>
        /// <returns>Average utility of the players</returns>
        public override double start(Logger logger)
        {
            _accumUtility = 0; //Total utility of all agents in group
            int nextActivePlayer;
            int activePlayer = Generator.getNextInt(_environment.agents.Count); //Choose the first agent to play

            //Play the game
            for (int round = 0; round < _numOfRounds; ++round)
            {
                //Get the performance of the active player
                double performance = _environment.agents[activePlayer].performNow();

                //Accumulate the performance of the active player
                _accumUtility += performance;

                //Add a new observation of current turn by all players
                foreach (Agent agent in _environment.agents)
                {
                    agent.addObservation(new Observation(round, _environment.agents[activePlayer].ID,
                                         performance));
                }

                //Choose the next performer
                nextActivePlayer = chooseNextPerformer(activePlayer);
                //environment.agents[activePlayer].chooseNextPerformer();

                //QUESTION why not choosing next player and then add observation?

                //Update the last observation of each player with the next player
                foreach (Agent agent in _environment.agents)
                {
                    agent.updateLastObservationWithNextAgent(activePlayer, nextActivePlayer);
                }

                //Set active player
                activePlayer = nextActivePlayer;
            }

            //Calculate the average utility
            double avgUtility = _accumUtility / _numOfRounds;

            return avgUtility; //Return the calculated average utility
        }


        /// <summary>
        /// Decides which player is the next player
        /// </summary>
        /// <param name="activePlayer">the ID of the active player</param>
        /// <returns>ID of the next player according to active player decision</returns>
        protected override int chooseNextPerformer(int activePlayer)
        {
            return _environment.agents[activePlayer].chooseNextPerformer();
        }
    }
}
