﻿using System;
using System.Collections.Generic;
using System.Data.Objects.SqlClient;
using System.Runtime.Caching;
using System.ServiceModel;
using System.Transactions;
using Sern.Abstracts.Common;
using Sern.Abstracts.Tracker.DataContracts;
using Sern.Abstracts.Tracker.ServiceContracts;
using Sern.Common;
using Microsoft.Practices.Unity;
using System.Linq;
using Sern.ServiceImplementation.TrackerDefinition.Data;

namespace Sern.ServiceImplementation.TrackerDefinition.Service
{
    [ServiceBehavior(TransactionIsolationLevel = IsolationLevel.Snapshot, ConcurrencyMode = ConcurrencyMode.Multiple,ReleaseServiceInstanceOnTransactionComplete = false)]
    public partial class Tracker : ITracker
    {
        private const string FilterClientCountCacheKey =
            "Sern.ServiceImplementation.Tracker.TrackerService.AllFilterClientCount";

        private readonly TimeSpan _filterClientCountCacheSlidingExpiration = new TimeSpan(0,0,0, 10);

        
        /// <summary>
        /// Get the number of client of type filter, past and present!
        /// </summary>
        private int AllFilterClientCount
        {
            get
            {
                if (MemoryCache.Default[FilterClientCountCacheKey] == null)
                {
                    //The filter cannot be added or removed, if they're dead, they're dead, but they should still be used in the hahing function, otherwise hard to match.
                    var filterCount = (from client in ComponentLocator.Unity.Resolve<IRepository<Client>>()
                                           .Records.OfType<ClientStatusInfo>()
                                       where client.Type == (int) ClientType.Filter
                                        //&& client.EndOfSubscription == null
                                        //&& (client.LastAlivePing >= TrackerCommon.LastValidActivityTime
                                        //    || client.StartOfSubscription >= TrackerCommon.LastValidActivityTime)
                                       select client).Count();

                    MemoryCache.Default.Add(FilterClientCountCacheKey, 
                        filterCount, 
                        new CacheItemPolicy
                        {
                            SlidingExpiration =
                                _filterClientCountCacheSlidingExpiration
                        });
                }

                return (int) MemoryCache.Default[FilterClientCountCacheKey];
            }
        }

        /// <summary>
        /// The jobId associated with this tracker.
        /// </summary>
        [Obsolete("State is not scaleable. Make it stateless and transparent aka. pass through so it can be load balanced.", true)]
        public Guid JobId { get; private set; }

        /// <summary>
        /// Get the jobId from uri.
        /// </summary>
        /// <param name="uriPath"></param>
        /// <returns></returns>
        /// <remarks>
        /// We are just gonna make it a singleton. and you can get the ID from a local state. That'll reduce the need of a more complicated schema like this.
        /// </remarks>
        [Obsolete("",true)]
        public static Guid GetJobIdFromUri(string uriPath)
        {
            return Guid.Parse(
                uriPath.Split(
                    new[] {'.', '/'}, StringSplitOptions.RemoveEmptyEntries).Reverse().Skip(1).First());
        }

        /// <summary>
        /// Get the job Uri from string.
        /// </summary>
        /// <param name="jobId"></param>
        /// <returns></returns>
        [Obsolete("", true)]
        public static Uri CreateUriFromJobId(Guid jobId)
        {
            return new Uri("Tracker/Job/" + jobId.ToString("N"));
        }

        /// <summary>
        /// Get the client uri appropriate for the given parameters.
        /// </summary>
        /// <param name="jobId"> </param>
        /// <param name="type"></param>
        /// <param name="keyHash"></param>
        /// <param name="keyRedundancyHash"> </param>
        /// <param name="keyShape"></param>
        /// <param name="valueShape"></param>
        /// <returns></returns>
        /// <remarks>
        /// An alternative to the affinity would be to hash out the keyHash and figure out
        /// the loop number the keyHash is running on and cross that against the all the clients
        /// available for processing at that loop. Mod the hash by the number of clients and
        /// sticky all such keys to that client. The downside to this is iterating through the number of loop
        /// that or we need to start exchanging the keyHash history information with the request.
        /// All of them will have its own performance bottleneck and constraints as the number of loop increases.
        /// </remarks>
        [OperationBehavior(TransactionScopeRequired = true, TransactionAutoComplete = true)]
        public ClientInfo FindActiveClientInfo(Guid jobId, ClientType type, Guid keyHash, Guid keyRedundancyHash, string keyShape, string valueShape)
        {            
            var clientRepository = ComponentLocator.Unity.Resolve<IRepository<Client>>();
            var taskRepository = ComponentLocator.Unity.Resolve<IRepository<KeyAtClientLog>>();

            var clientTypeId = (int) type;

            var lastActivityCutOffPoint = TrackerCommon.LastValidActivityTime;

            var matchingClient = FindMatchingClient2(jobId, keyHash, keyRedundancyHash, keyShape, valueShape, lastActivityCutOffPoint, taskRepository, clientRepository, clientTypeId);

            if (matchingClient == null)
                return null;

            return
                new ClientInfo
                    {
                        JobId = matchingClient.JobId,
                        Id = matchingClient.Id,
                        KeyShape = matchingClient.KeyShape,
                        ValueShape = matchingClient.ValueShape,
                        Uri = matchingClient.Uri,
                        Type = (ClientType) matchingClient.Type,
                        HostUri = matchingClient.ClientHostUri
                    };                
        }

        #region Obsolete code
        [Obsolete("Too slow also potentially prone to concurrency issues. Here for reference purposes.")]
        private static ClientStatusInfo GetMatchingClient(Guid jobId, Guid key, string keyShape, string valueShape,
                                                          DateTime lastActivityCutOffPoint, IRepository<KeyAtClientLog> taskRepository,
                                                          IRepository<ClientStatusInfo> clientRepository, int clientTypeId)
        {
            var matchingClient
                = (from client in clientRepository.Records
                   join task in taskRepository.Records
                       on client.Id equals task.ClientId
                       into clientTask
                   where client.Type == clientTypeId
                         && client.JobId == jobId
                         && client.KeyShape == keyShape
                         && client.ValueShape == valueShape
                         //Filter has a keyHash affinity, so if one was created for that keyHash, subsequent request will go to the same client - no matter how
                         && (client.Type == (int) ClientType.Filter
                             && (clientTask.Any(_ => _.KeyId == key)
                                 || !clientRepository
                                         .Records
                                         .Join(taskRepository.Records,
                                               _ => _.Id,
                                               _ => _.ClientId,
                                               (_, __) => __
                                         ).Any(_ =>
                                               _.KeyId == key
                                               && _.State == (int) ProcessingStage.Processing)
                                ) ||
                             //Otherwise if we are not looking  at the filter phase then we must make sure that the client is active.
                             client.EndOfSubscription == null
                             && (client.StartOfSubscription >= lastActivityCutOffPoint
                                 || client.LastAlivePing >= lastActivityCutOffPoint)
                            )
                   //Order by decending number of completed - processing. 0 will be the max if the client is not working.
                   orderby clientTask.Count(t => t.State == (int) ProcessingStage.Completed)
                           - clientTask.Count(t => t.State == (int) ProcessingStage.Processing)
                       descending
                   select client).First();
            return matchingClient;
        }
        #endregion

        /// <summary>
        /// Select the best uri to process information with signature as given in the parameters. For filter will have sticky keyHash ie. the same keyHash will always go to the same filter.
        /// For this reason the number of filter MUST stay constant while the job is running! It can only be modified at the completion!
        /// </summary>
        /// <param name="jobId"></param>
        /// <param name="key"></param>
        /// <param name="keyRedundancyHash">For no redundancy the keyhash should be identical to the key, for non map reduce type of client eg. telemetry or logger, keyhash is Guid.Empty</param>
        /// <param name="keyShape"></param>
        /// <param name="valueShape"></param>
        /// <param name="lastActivityCutOffPoint"></param>
        /// <param name="taskRepository"></param>
        /// <param name="clientRepository"></param>
        /// <param name="clientTypeId"></param>
        /// <returns></returns>
        private ClientStatusInfo FindMatchingClient2(Guid jobId, Guid key, Guid keyRedundancyHash, string keyShape, string valueShape,
                                                          DateTime lastActivityCutOffPoint, IRepository<KeyAtClientLog> taskRepository,
                                                          IRepository<Client> clientRepository, int clientTypeId)
        {
            var matchingClientQuery
                = from client in clientRepository.Records.OfType<ClientStatusInfo>()
                  join task in taskRepository.Records
                      on client.Id equals task.ClientId
                      into clientTasks
                  from clientTask in clientTasks.DefaultIfEmpty()                  
                  where client.Type == clientTypeId
                        && client.JobId == jobId
                        && client.KeyShape == keyShape
                        && client.ValueShape == valueShape
                        //Filter has a keyHash affinity, so if one was created for that keyHash, subsequent request will go to the same client - no matter how even if the client was known to be down!
                        && (client.Type == (int) ClientType.Filter
                            //Otherwise if we are not looking  at the filter phase then we must make sure that the client is active.
                            || client.EndOfSubscription == null
                            && (client.StartOfSubscription >= lastActivityCutOffPoint
                                || client.LastAlivePing >= lastActivityCutOffPoint)
                           )
                //TODO: Depending on query's performance you might need to run this grouping on server rather than db.
                  group new {client, clientTask} by client.Id
                  into clientTaskAndKey

                  select new {clientTaskAndKey.First().client, taskAndKey = clientTaskAndKey.Select(_ => _.clientTask)};
                  

            //If the client uri to search for is of type filter, they we need to enable sticky keyHash.
            if(clientTypeId == (int) ClientType.Filter)
            {
                int hash = GetFilterHash(key);
                //The job id is included in the ordering for better distribution of resources. The fear is that due to keyHash affinity one filter would be doing all the job.
                matchingClientQuery = (from _ in matchingClientQuery
                                    //We're expecting the client id that equals to offset to shopw up Const.MinRedundancy times.
                                       where _.client.IdHash % (AllFilterClientCount > Constant.MinRedundancy ? (AllFilterClientCount / Constant.MinRedundancy) : Constant.MinRedundancy) == hash
                                       orderby _.client.JobId, _.client.Id
                                       select _);//.Skip(offset);  
                
            }

            //Join with the aggregate and show.
            var redundancyCount =
                from client in clientRepository.Records
                from keyLog in taskRepository.Records.Where(t => t.ClientId == client.Id && t.RedundancyKey == keyRedundancyHash).DefaultIfEmpty()                    
                group keyLog by new {client.ClientHostUri, RedundancyKey = keyLog != null ? keyLog.RedundancyKey ?? Guid.Empty : Guid.Empty}
                into keyLogGroup
                select
                    new
                        {
                            keyLogGroup.Key.ClientHostUri,
                            RedundancyCount = keyLogGroup.Count(k => k != null)
                        };

            var usageCount =

                from client in clientRepository.Records
                from keyLog in taskRepository.Records.Where(t => t.ClientId == client.Id && t.State == (int) ProcessingStage.Processing).DefaultIfEmpty()
                group keyLog by keyLog != null ? keyLog.ClientId : Guid.Empty
                into keyLogGroup
                select new {ClientId = keyLogGroup.Key, BusyCount = keyLogGroup.Count(k => k != null)};

            //Otherwise maximize redundancy by picking different client 
            matchingClientQuery = from _ in matchingClientQuery
                                  from ruc in redundancyCount.Where(ruc => _.client.ClientHostUri == ruc.ClientHostUri).DefaultIfEmpty()
                                  from uc in usageCount.Where(uc => uc.ClientId == _.client.Id).DefaultIfEmpty()
                                  orderby //_.clientTask.Count(t => t.State == (int)ProcessingStage.Completed) //<-- Causes circular logic with completed being dependent on the the assignment etc. Potentially a client with 1000 completed and 100 assignment would always be favored than a client with 500 completed and 1 assignment. And it keeps getting skewed each time.
                                        ruc == null ? 0 : ruc.RedundancyCount,
                                        uc == null ? 0 : uc.BusyCount
                                  select _;

            var matchingClient = matchingClientQuery.FirstOrDefault();
            
            return matchingClient != null ? matchingClient.client : null;            
        }


        /// <summary>
        /// Add a new client to the tracker registry.
        /// </summary>
        /// <param name="jobId"> </param>
        /// <param name="clientId"> </param>
        /// <param name="clientType"></param>
        /// <param name="clientUri"></param>
        /// <param name="keyShape"></param>
        /// <param name="valueShape"></param>
        /// <param name="clientHostUri"> </param>
        /// <returns></returns>
        /// <remarks>
        /// Should probably prevent subscription while a job is in progress and the stream isn't completely cleared yet.
        /// </remarks>
        [OperationBehavior(TransactionScopeRequired = true, TransactionAutoComplete = true)]
        public void Subscribe(Guid jobId, Guid clientId, ClientType clientType, string clientUri, string keyShape, string valueShape, string clientHostUri)
        {
            TrackerCommon.Subscribe(jobId, clientId, clientType, clientUri, keyShape, valueShape, clientHostUri);
        }

        

        /// <summary>
        /// Unsubscribe a client.
        /// </summary>
        /// <param name="clientId"></param>
        /// <remarks>We should probably throw exception or ignore request to remove filter while the job stream isn't completely flushed!</remarks>
        [OperationBehavior(TransactionScopeRequired = true, TransactionAutoComplete = true)]
        public void Unsubscribe(Guid clientId)
        {
            TrackerCommon.Unsubscribe(clientId);
        }

        /// <summary>
        /// Redundancy Id is a hash that uniquely identifies via folding and tracking technique, how many identically redundant message
        /// has been sent. A message with the same keyHash is a duplicate of each other. A certain min is required so that the network can "self-heal"
        /// 
        /// </summary>
        /// <param name="jobId"> </param>
        /// <param name="key"> </param>
        /// <param name="redundancyKey"> </param>
        /// <param name="targetClientType"> </param>
        /// <returns></returns>
        [OperationBehavior(TransactionScopeRequired = true, TransactionAutoComplete = true)]
        public bool IsMinimumRedundancySatisfied(Guid jobId, Guid key, Guid redundancyKey, ClientType targetClientType)
        {
            var keyRecords = ComponentLocator.Unity.Resolve<IRepository<KeyAtClientLog>>().Records;
            var isMinimumRedundancyMet = Constant.MinRedundancy <= keyRecords.Count(_ => _.RedundancyKey == redundancyKey && _.State == (int) ProcessingStage.Processing);
                        
            if (targetClientType != ClientType.Filter)
            {
                return isMinimumRedundancyMet;
            }
            //target type filter is only satisfied when all filter that matches the criteria are filled in! You can send more than one to each, but you gotta send once to all.
            else
            {
                var clientRepository = ComponentLocator.Unity.Resolve<IRepository<Client>>();
                var taskRepository = ComponentLocator.Unity.Resolve<IRepository<KeyAtClientLog>>();
                //var keyRepository = ComponentLocator.Unity.Resolve<IRepository<KeyTree>>();
                int hash = GetFilterHash(key);

                var isAtLeastOnePerFilter = (from client in clientRepository.Records.OfType<ClientStatusInfo>()
                                             join task in taskRepository.Records.Where(
                                                _ => (_.State == (int)ProcessingStage.Processing ||
                                                    _.State == (int)ProcessingStage.Completed)
                                                    && _.KeyId == key
                                             )
                                                 on client.Id equals task.ClientId
                                                 into clientTasks
                                             from clientTask in clientTasks.DefaultIfEmpty()
                                             //from taskKey in keyRepository.Records.Where(_ => _.Id == clientTask.KeyId).DefaultIfEmpty()
                                             where
                                                 client.Type == (int)ClientType.Filter
                                                 &&
                                                 client.IdHash %
                                                 (AllFilterClientCount > Constant.MinRedundancy
                                                      ? (AllFilterClientCount / Constant.MinRedundancy)
                                                      : Constant.MinRedundancy) == hash
                                                 && client.JobId == jobId
                                             group new { client, clientTask } by client.Id
                                                 into clientTaskAndKey
                                                 select
                                                     new
                                                     {
                                                         clientTaskAndKey.Key,
                                                         taskAndKey = clientTaskAndKey.Select(_ => new { _.clientTask })
                                                     })
                                                 .Any(
                                                     _ =>
                                                     !_.taskAndKey.Any(
                                                         __ => __.clientTask == null || __.clientTask.RedundancyKey == redundancyKey));

                return isAtLeastOnePerFilter && isMinimumRedundancyMet;
            }
        }

        private int GetFilterHash(Guid key)
        {
            return key.GetHashCode() % (AllFilterClientCount > Constant.MinRedundancy ? (AllFilterClientCount / Constant.MinRedundancy) : Constant.MinRedundancy);
        }


        [Obsolete("Not needed, there are better wayd to do this", true)]
        public IEnumerable<Guid> GetTimedOutRootKeys(TimeSpan timeOutSpan)
        {
            var taskRepository = ComponentLocator.Unity.Resolve<IRepository<KeyAtClientLog>>();
            var keyRepository = ComponentLocator.Unity.Resolve<IRepository<KeyTree>>();


            var timedOutTasks = (from key in keyRepository.Records
                                 join task in taskRepository.Records
                                     on key.Id equals task.KeyId
                                     into keyTask
                                 where !keyTask.Any(_ => _.State == (int)ProcessingStage.Completed)
                                     && keyTask.Any(_ => _.State == (int)ProcessingStage.Initiated
                                                         && _.TimeStamp - DateTime.UtcNow > timeOutSpan
                                             )
                                 select key).ToArray();

            IEnumerable<KeyTree> childTimedOutTasks = timedOutTasks.Where(_ => _.ParentKey != null).ToArray();

            IEnumerable<KeyTree> rootTimedOutTasks = timedOutTasks.Where(_ => _.ParentKey == null).ToArray();

            //Trace the parent keyHash from the subkeys.
            while(childTimedOutTasks.Any())
            {
                var temp =  (from key in keyRepository.Records
                             //TODO: Double check if this will work out coz the query provider will have to interprete this rather complex expression.
                            where childTimedOutTasks.Select(_ => _.ParentKey).Contains(key.Id)
                            select key).ToArray();

                rootTimedOutTasks = rootTimedOutTasks.Concat(temp.Where(_ => _.ParentKey == null));
                childTimedOutTasks = temp.Where(_ => _.ParentKey != null);
            }

            return rootTimedOutTasks.Select(_ => _.Id);
        }

        
    }
}
