﻿using System;
using System.Collections.Generic;
using AdaptiveAgents.Agents;
using AdaptiveAgents.Loggers;
using AdaptiveAgents.Distributions;
using AdaptiveAgents.TeammateModels;

namespace AdaptiveAgents.Agents
{
    /// <summary>
    /// Agent that participates in games
    /// </summary>
    public class Agent 
    {
        #region Data members
        private int id; //Agent ID
        private IDistribution distribution; //The competence of the agent

        
        private double epsilon; //exploration rate
        private int memoryPerAgent; //number of observations saved per agent when estimating competence
        private int numActions; //Actions so far
        private int changeStrategyPoint = 0; //replace the "choose next player" strategy from game round...

        private Environment environment; //EnvironmentType of the agent
        private ObservationsSet observations;
        private NextPlayerPredicter nextPlayerPredicter;
        private NextPlayerChooser nextPlayerChooser;
        private List<TeammateModel> teammateModels;

        public List<TeammateModel> TeammateModels
        {
            get { return teammateModels; }
            set { teammateModels = value; }
        }
        private Logger logger;

        #endregion
        
        #region Methods

        /// <summary>
        /// Gets an observation of the agent by ID
        /// </summary>
        /// <param name="ID">ID of other agent</param>
        /// <param name="memory">Memory limit</param>
        /// <returns>Observation of the agent with the param ID</returns>
        public ObservationsSet getObservationByID(int ID, int memory)
        {
            return observations.getObservationsByAgentID(ID, memoryPerAgent);
        }
        
        /// <summary>
        /// Choosing the next agent to perform
        /// </summary>
        /// <returns>ID of next performer</returns>
        public int chooseNextPerformer() 
        {
            return chooseNextPerformer(memoryPerAgent); 
        }

        /// <summary>
        /// Chooses the next agent to perform
        /// </summary>
        /// <param name="maxToConsider">Max memory of agent to consider</param>
        /// <returns>ID of next performer</returns>
        public int chooseNextPerformer(int maxToConsider)
        {
            return nextPlayerChooser.choose(maxToConsider);
        }

        /// <summary>
        /// Predicts the next agent to perdorm
        /// CURRENTLY NOT IN USE 22/06/2009
        /// </summary>
        /// <returns></returns>
        public List<int> predictNextPerformer()
        {
            return nextPlayerPredicter.predict();
        }

        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="id">Agent ID</param>
        /// <param name="environment">EnvironmentType of the agent</param>
        /// <param name="eps">epsilon - exploration factor</param>
        /// <param name="logger">Logger that reports about the agent</param>
        public Agent(int id, Environment environment, double eps, Logger logger)
        {
            this.id = id;
            this.environment = environment;
            this.epsilon = eps;
            observations = new ObservationsSet();
            this.numActions = 0;
            //this.memoryPerAgent = int.MaxValue;
            teammateModels = new List<TeammateModel>();
            this.logger = logger;
        }

        /// <summary>
        /// Initialization of the agent's teammate models that are used for observations
        /// </summary>
        /// <param name="numOfAgents">Number of agents in environment</param>
        public void initializeTeammateModels(int numOfAgents)
        {
            //Scan each agent and create a new teammate model
            for (int i = 0; i < numOfAgents; i++)
            {
                Agent ag = environment.agents[i];
                TeammateModel teammate = new TeammateModel(this, ag, environment);
                teammateModels.Add(teammate);
            }

            //foreach (teammatemodel teammate in teammatemodels)
            //{
            //    teammate.print(messagetype.hackmessage);
            //}
        }

        /// <summary>
        /// Gets a teammate model by an ID
        /// </summary>
        /// <param name="ID">ID of an agent</param>
        /// <returns>Teammate model of the agent with the specified ID</returns>
        public TeammateModel getTeammateModel(int ID)
        {
            foreach (TeammateModel teammate in teammateModels)
            {
                if (teammate.getID() == ID)
                    return teammate;
            }
            System.Console.Error.WriteLine("No teamamte with that ID");
            System.Diagnostics.Debug.Assert(false);
            
            return teammateModels[0]; //arbitrary return - shouldn't get here
        }

        /// <summary>
        /// Print a single line description of the agent
        /// </summary>
        public void printRecord()
        {
            //logger.Write(MessageType.HackMessage, "Agent {0} - Distribution:",id);
            //logger.Write(MessageType.HackMessage, distribution.describe()); //media = 0-screen 1-file 2-both
        }

        /// <summary>
        /// The agent gets to perform (was chosen to perform)
        /// </summary>
        /// <returns>The performance of the agent</returns>
        public double performNow()
        {
            ++numActions;
            return distribution.generateValue();
        }

        // Need to separate these functions if we want to use the most
        // recent observation when choosing the next performer...
        public void addObservation(Observation observation)
        {
            observations.addObservation(observation);
        }

        /// <summary>
        /// Update the last observation with next agent
        /// </summary>
        /// <param name="activeAgent">The current active agent in the world</param>
        /// <param name="nextAgent">Next agent to be active</param>
        /// <returns>True if active agent acting as expected</returns>
        public bool updateLastObservationWithNextAgent(int activeAgent, int nextAgent)
        {
            observations.updateNextAgent(nextAgent);
            return getTeammateModel(activeAgent).updateEpsilon(nextAgent);
        }


        #endregion

        #region Properties
        
        /// <summary>
        /// Gets the observation set of the agent
        /// </summary>
        public ObservationsSet ObservationsSet
        {
            get { return observations; }
        }
        
        /// <summary>
        /// Gets total actions made by the agents
        /// </summary>
        public int NumActions
        {
            get { return numActions; }
        }

        /// <summary>
        /// Gets the predicter of the agent
        /// </summary>
        public NextPlayerPredicter NextPlayerPredicter
        {
            set { nextPlayerPredicter = value; }
        }
        
        /// <summary>
        /// Gets the chooser of the next player to perform
        /// </summary>
        public NextPlayerChooser NextPlayerChooser
        {
            set { nextPlayerChooser = value; }
        }
        
        /// <summary>
        /// Gets the agent's ID
        /// </summary>
        public int ID
        {
            get { return id; }
        }
        
        /// <summary>
        /// Gets the environment which the agent belongs to
        /// </summary>
        public Environment Environment
        {
            get { return environment; }
            //set { environment = value; }
        }
        
        /// <summary>
        /// Gets or Sets the memory of the agent
        /// </summary>
        public int Memory
        {
            get { return this.memoryPerAgent; }
            set { this.memoryPerAgent = value; }
        }

        /// <summary>
        /// Gets the exploration factor of the agent
        /// </summary>
        public double Epsilon
        {
            get { return epsilon; }
        }

        /// <summary>
        /// Gets and Sets a distribution for the agent's competence
        /// </summary>
        public IDistribution Competence
        {
            get { return distribution; }
            set { distribution = value; }
        }

        /// <summary>
        /// Gets and Sets the strategy point of the agent
        /// </summary>
        public int ChangeStrategyPoint
        {
            get { return changeStrategyPoint; }
            set { changeStrategyPoint = value; }
        }

        #endregion
    }

}
