// License Notification
//======================================================
// Copyright (c) 2010 Channel Intelligence Inc.
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// which accompanies this distribution, and is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// ======================================================




using System;
using System.Collections.Generic;
using CIP4.Common;
using CIP4.Item;

namespace CIP4.Simple
{
    [CIP4Item(AddAllProperties=true)]
    public class CIP4ScalablePropertyType<T>
    {
        // This class is not implemented as an item well, because an item well acts more like a dictionary,
        // whereas we needed something that acted more like an ordered sequence of values.
        // Making an item well behave this way would have required changing the interface to all other item wells
        // in ways that didn't make sense and would not have been supportable.
        // We optimize here for sequential reads (forward from the front and backward from the end),
        // not random access using keys.

        public delegate string GetExternalIDDelegate ();
        public delegate bool FilterDelegate (T value);

        private string _ownerExternalIdentifier;
        private readonly GetExternalIDDelegate _ownerExternalIdentifierDelegate;

        public string OwnerExternalIdentifier 
        {
            get
            {
                return _ownerExternalIdentifierDelegate == null ? 
                    _ownerExternalIdentifier : _ownerExternalIdentifierDelegate();
            }

            set
            {
                if (_ownerExternalIdentifierDelegate == null)
                {
                    _ownerExternalIdentifier = value;
                }
                else
                {
                    throw new ApplicationException ("Not allowed to set the external identifier of the owner when the delegate is present.");
                }
            }
        }

        public string CollectionName { get; set; }

        /// <summary>
        /// The maximum number of values that are allowed in a chunk, in FrontList, or in BackList.
        /// Must be a positive integer.
        /// </summary>
        public int MaximumChunkSize { get; set; }

        /// <summary>
        /// The number of modified chunks that can accumulate in memory before being flushed to the underlying store.
        /// </summary>
        public int DirtyChunkThreshold { get; set; }

        /// <summary>
        /// Holds the first few (up to MaximumChunkSize) values embedded in this scalable property object for fast access.
        /// </summary>
        public List<T> FrontList { get; set; }

        /// <summary>
        /// Holds the last few (up to MaximumChunkSize) values embedded in this scalable property object for fast access.
        /// </summary>
        public List<T> BackList { get; set; }

        /// <summary>
        /// The number of values currently stored inside this scalable property object.
        /// </summary>
        public long Count { get; set; }

        /// <summary>
        /// The identifier of the chunk that follows the FrontList, or NULL_CHUNK_IDENTIFIER if there are no overflow chunks.
        /// </summary>
        public string FirstOverflowChunkIdentifier { get; set; }

        /// <summary>
        /// The identifier of the chunk that precedes BackList, or NULL_CHUNK_IDENTIFIER if there are no overflow chunks.
        /// </summary>
        public string LastOverflowChunkIdentifier { get; set; }

        public CIP4ScalablePropertyType ()
        {
            // Set up reasonable defaults, even though they will likely get stomped on by the deserializer...
            MaximumChunkSize = 10;
            DirtyChunkThreshold = 5;
            FrontList = new List<T> ();
            BackList = new List<T> ();
            FirstOverflowChunkIdentifier = NullChunkIdentifier;
            LastOverflowChunkIdentifier = NullChunkIdentifier;
            Count = 0;
        }

        public CIP4ScalablePropertyType (
            CIP4ConnectionAttribute chunkConnection, 
            GetExternalIDDelegate getExternalIdentifierDelegate, 
            string collectionName, 
            int maxChunkSize,
            int dirtyChunkThreshold )
        {
            ChunkConnection = chunkConnection;
            _ownerExternalIdentifierDelegate = getExternalIdentifierDelegate;
            CollectionName = collectionName;
            MaximumChunkSize = maxChunkSize;
            DirtyChunkThreshold = dirtyChunkThreshold;
            FrontList = new List<T> ();
            BackList = new List<T> ();
            FirstOverflowChunkIdentifier = NullChunkIdentifier;
            LastOverflowChunkIdentifier = NullChunkIdentifier;
            Count = 0;
        }

        [CIP4Property (SerializeTarget = true)]     // !!! FIXFIXFIX:  Migrating the scalable property will NOT migrate the chunks - how do we fix this?
        public CIP4ConnectionAttribute ChunkConnection { get; set; }

        private readonly Dictionary<string, ChunkType<T>> _dirtyChunks = new Dictionary<string, ChunkType<T>>();

        private static readonly Dictionary<string, StrongWrapper<ChunkType<T>>> ChunkWrapper = new Dictionary<string, StrongWrapper<ChunkType<T>>>();

        /// <summary>
        /// Appends a value to the end of the ordered sequence.
        /// This method makes no attempt to replace an existing value; 
        /// the value is always appended even if it already appears in the ordered sequence (scalable property).
        /// The caller is responsible for serializing and storing the scalable property after one or more calls to Append.
        /// </summary>
        /// <param name="value">The value to be inserted at the end of the ordered sequence.</param>
        /// <param name="wrapperBase"></param>
        public void Append (T value, StrongWrapperBase wrapperBase)
        {
            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);
            
            LoggerDelegate logger = baseSerializer.Logger;

            // PrepareWrapper does important sanity checks as a side effect
            StrongWrapper<ChunkType<T>> wrapper = PrepareWrapper(baseSerializer);

            BackList.Add (value);       // go ahead and append the value to BackList.  Then see if BackList is too big...
            ++Count;

            if (BackList.Count > MaximumChunkSize)
            {
                // The BackList is now bigger than the maximum allowed count.
                // We need to evict the first item it contains to keep the size at the maximum allowed.
                T evicted = BackList[0];
                BackList.RemoveAt (0);

                // Now we need to figure out where to put the evicted value...

                ChunkType<T> oldChunk;
                if (LastOverflowChunkIdentifier != NullChunkIdentifier)
                {
                    // There is an existing overflow chunk immediately preceding the embedded BackList.
                    // Look there to see if we can move the evicted value from BackList into that overflow chunk to make room.
                    oldChunk = LoadChunk (LastOverflowChunkIdentifier, true, baseSerializer);

                    if (oldChunk.Values.Count < MaximumChunkSize)
                    {
                        // We can move the one displaced value from BackList to oldChunk.Values.
                        oldChunk.Values.Add (evicted);
                    }
                    else
                    {
                        // Darn it, there is no room in the final overflow chunk.  We have to create a new chunk.
                        ChunkType<T> newChunk = NewChunk(LastOverflowChunkIdentifier, NullChunkIdentifier);
                        oldChunk.NextChunkIdentifier = newChunk.ExternalIdentifier;
                        LastOverflowChunkIdentifier = newChunk.ExternalIdentifier;
                        newChunk.Values.Add (evicted);
                    }
                }
                else
                {
                    // There are no overflow chunks yet.  See if we can get away with moving some stuff into the FrontList
                    // so as to make room in the BackList...
                    if (FrontList.Count < MaximumChunkSize)
                    {
                        // We can move the evicted value from BackList to FrontList.
                        FrontList.Add (evicted);
                    }
                    else
                    {
                        // Darn it, we have to create a new chunk because FrontList is full also.  Oh well...
                        ChunkType<T> newChunk = NewChunk(NullChunkIdentifier, NullChunkIdentifier);
                        FirstOverflowChunkIdentifier = newChunk.ExternalIdentifier;
                        LastOverflowChunkIdentifier  = newChunk.ExternalIdentifier;
                        newChunk.Values.Add (evicted);
                    }
                }

                FlushIfNeeded (logger, wrapper, baseSerializer);    // prevent too many dirty chunks piling up in memory
            }
        }

        /// <summary>
        /// Inserts a value at the front of the ordered sequence.
        /// This method makes no attempt to replace an existing value; 
        /// the value is always prepended even if it already appears in the ordered sequence (scalable property).
        /// The caller is responsible for serializing and storing the scalable property after one or more calls to Prepend.
        /// </summary>
        /// <param name="value">The value to be inserted at the front of the ordered sequence.</param>
        /// <param name="wrapperBase"></param>
        public void Prepend (T value, StrongWrapperBase wrapperBase)
        {
            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);

            LoggerDelegate logger = baseSerializer.Logger;

            // PrepareWrapper does important sanity checks as a side effect
            StrongWrapper<ChunkType<T>> wrapper = PrepareWrapper (baseSerializer);

            FrontList.Insert (0, value);       // go ahead and insert the value to FrontList.  Then see if FrontList is too big...
            ++Count;

            if (FrontList.Count > MaximumChunkSize)
            {
                // The FrontList is now bigger than the maximum allowed count.
                // We need to evict the last item it contains to keep the size at the maximum allowed.
                T evicted = FrontList[FrontList.Count - 1];
                FrontList.RemoveAt (FrontList.Count - 1);

                // Now we need to figure out where to put the evicted value...

                ChunkType<T> oldChunk;
                if (FirstOverflowChunkIdentifier != NullChunkIdentifier)
                {
                    // There is an existing overflow chunk immediately following the embedded FrontList.
                    // Look there to see if we can move the evicted value from FrontList into that overflow chunk to make room.
                    oldChunk = LoadChunk (FirstOverflowChunkIdentifier, true, baseSerializer);

                    if (oldChunk.Values.Count < MaximumChunkSize)
                    {
                        // We can move the one displaced value from FrontList to oldChunk.Values.
                        oldChunk.Values.Insert (0, evicted);
                    }
                    else
                    {
                        // Darn it, there is no room in the first overflow chunk.  We have to create a new chunk.
                        ChunkType<T> newChunk = NewChunk (NullChunkIdentifier, FirstOverflowChunkIdentifier);
                        oldChunk.PrevChunkIdentifier = newChunk.ExternalIdentifier;
                        FirstOverflowChunkIdentifier = newChunk.ExternalIdentifier;
                        newChunk.Values.Add (evicted);
                    }
                }
                else
                {
                    // There are no overflow chunks yet.  See if we can get away with moving some stuff into the BackList
                    // so as to make room in the BackList...
                    if (BackList.Count < MaximumChunkSize)
                    {
                        // We can move the evicted value from BackList to FrontList.
                        BackList.Insert (0, evicted);
                    }
                    else
                    {
                        // Darn it, we have to create a new chunk because BackList is full also.  Oh well...
                        ChunkType<T> newChunk = NewChunk (NullChunkIdentifier, NullChunkIdentifier);
                        FirstOverflowChunkIdentifier = newChunk.ExternalIdentifier;
                        LastOverflowChunkIdentifier = newChunk.ExternalIdentifier;
                        newChunk.Values.Add (evicted);
                    }
                }

                FlushIfNeeded (logger, wrapper, baseSerializer);    // prevent too many dirty chunks piling up in memory
            }
        }


        /// <summary>
        /// Enumerates any contiguous subsequence of the contained values, moving in either a forward or backward direction.
        /// </summary>
        /// <param name="direction">Use EnumerationDirectionType.Forward to enumerate from the front of the ordered sequence toward the back,
        /// or EnumerationDirectionType.Backward to enumerate from the back of the ordered sequence toward the front.</param>
        /// <param name="numberToSkip">The number of values to skip over at the front/back of the ordered sequence.</param>
        /// <param name="maximumNumberToEnumerate">The upper limit of the number of values to enumerate.  Use long.MaxValue for unlimited enumeration.</param>
        /// <param name="wrapperBase"></param>
        /// <returns>An IEnumerable that lazy-iterates over the specified subsequence.  The caller may break out of the foreach loop to avoid any needless I/O overhead.</returns>
        public IEnumerable<T> OrderedSubsequence(EnumerationDirectionEnum direction, long numberToSkip, long maximumNumberToEnumerate, StrongWrapperBase wrapperBase)
        {
            IEnumerable<T> enumerator;
            switch (direction)
            {
                case EnumerationDirectionEnum.Forward:
                    enumerator = ForwardEnumeration (wrapperBase);
                    break;

                case EnumerationDirectionEnum.Backward:
                    enumerator = BackwardEnumeration(wrapperBase);
                    break;

                default:
                    throw new ArgumentException (string.Format ("Unsupported enumeration direction '{0}'", direction));
            }

            long numSkipped = 0;
            long numReturned = 0;

            foreach (T value in enumerator)
            {
                if (numSkipped < numberToSkip)
                {
                    ++numSkipped;
                }
                else
                {
                    if (numReturned < maximumNumberToEnumerate)
                    {
                        yield return value;
                        ++numReturned;
                    }
                    else
                    {
                        yield break;
                    }
                }
            }

            yield break;
        }

        /// <summary>
        /// Enumerates all values in the ordered sequence in forward order.
        /// </summary>
        /// <returns>An IEnumerable that enumerates all values in forward order.</returns>
        public IEnumerable<T> ForwardEnumeration (StrongWrapperBase wrapperBase)
        {
            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);

            // Start with the embedded FrontList...
            foreach (T value in FrontList)
            {
                yield return value;
            }

            // Iterate through all overflow chunks...
            string currentChunkId = FirstOverflowChunkIdentifier;
            while (currentChunkId != NullChunkIdentifier) 
            {
                ChunkType<T> chunk = LoadChunk(currentChunkId, false, baseSerializer);
                foreach (T value in chunk.Values)
                {
                    yield return value;
                }
                currentChunkId = chunk.NextChunkIdentifier;
            }

            // Finish up with anything lurking in the BackList...
            foreach (T value in BackList)
            {
                yield return value;
            }

            yield break;
        }

        /// <summary>
        /// Enumerates all values in the ordered sequence in reverse order.
        /// </summary>
        /// <param name="wrapperBase"></param>
        /// <returns>An IEnumerable that enumerates all values in reverse order.</returns>
        public IEnumerable<T> BackwardEnumeration(StrongWrapperBase wrapperBase)
        {
            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);

            // Start with the BackList...
            for (int i = BackList.Count - 1; i >= 0; --i)
            {
                yield return BackList[i];
            }

            // Iterate through all overflow chunks...
            string currentChunkId = LastOverflowChunkIdentifier;
            while (currentChunkId != NullChunkIdentifier)
            {
                ChunkType<T> chunk = LoadChunk(currentChunkId, false, baseSerializer);
                for (int i = chunk.Values.Count - 1; i >= 0; --i)
                {
                    yield return chunk.Values[i];
                }
                currentChunkId = chunk.PrevChunkIdentifier;
            }

            // Finish up with anything lurking in the FrontList...
            for (int i = FrontList.Count - 1; i >= 0; --i)
            {
                yield return FrontList[i];
            }

            yield break;
        }

        /// <summary>
        /// Deletes all values from the ordered sequence, leaving it empty and releasing all storage allocated to it.
        /// An example of proper use is when the containing object is about to be purged, this function should be called
        /// to avoid orphaning its overflow chunks.
        /// </summary>
        public void Clear(StrongWrapperBase wrapperBase)
        {
            // !!! We need to consider thread-safety here.  What if another thread is enumerating/inserting/etc at the same time?

            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);

            StrongWrapper<ChunkType<T>> wrapper = PrepareWrapper(baseSerializer);
            string id = FirstOverflowChunkIdentifier;

            FrontList.Clear ();
            BackList.Clear ();
            Count = 0;
            FirstOverflowChunkIdentifier = NullChunkIdentifier;
            LastOverflowChunkIdentifier = NullChunkIdentifier;

            while (id != NullChunkIdentifier)
            {
                ChunkType<T> chunk = LoadChunk(id, false, baseSerializer);
                wrapper.PurgeItems (new[] { ExternalIDType.ExternalIDFromEscapedString( id ) });
                id = chunk.NextChunkIdentifier;
            }

            _dirtyChunks.Clear ();   // do this *last*, so that we can use in-memory modified objects (prev/next links may have been updated since the last save)
        }

        /// <summary>
        /// Writes any pending changes to overflow chunks.  
        /// This must be called by the serializer in order for state changes to be fully committed to storage.
        /// </summary>
        public void Flush(ISerializerType baseSerializer)
        {
            LoggerDelegate logger = baseSerializer.Logger;
            StrongWrapper<ChunkType<T>> wrapper = PrepareWrapper(baseSerializer);
            lock (_dirtyChunks)
            {
                InternalFlush (logger, wrapper, baseSerializer);
            }
        }

        /// <summary>
        /// This method should be called at convenient "checkpoints" when we know that all existing dirty chunks are in a stable state.
        /// In other words, it is bad to call FlushIfNeeded after marking a chunk dirty but before all the changes have been made to it
        /// that caused us to want to mark it dirty.  The risk is that the chunk could be flushed before we are done changing it, and
        /// those final changes could be lost.  This method exists to prevent dirty chunks from piling up so much that we consume excessive memory.
        /// </summary>
        /// <param name="logger">logger delegate</param>
        /// <param name="wrapper">strong wrapper for Type</param>
        /// <param name="baseSerializer"></param>
        private void FlushIfNeeded (LoggerDelegate logger, StrongWrapper<ChunkType<T>> wrapper, ISerializerType baseSerializer)
        {
            lock (_dirtyChunks)
            {
                if (_dirtyChunks.Count >= DirtyChunkThreshold)
                {
                    InternalFlush (logger, wrapper, baseSerializer);
                }
            }
        }

        /// <summary>
        /// This method should be called at convenient "checkpoints" when we know that all existing dirty chunks are in a stable state.
        /// In other words, it is bad to call FlushIfNeeded after marking a chunk dirty but before all the changes have been made to it
        /// that caused us to want to mark it dirty.  The risk is that the chunk could be flushed before we are done changing it, and
        /// those final changes could be lost.  This method exists to prevent dirty chunks from piling up so much that we consume excessive memory.
        /// </summary>
        /// <param name="baseSerializer">base serializer</param>
        private void FlushIfNeeded (ISerializerType baseSerializer)
        {
            LoggerDelegate logger = baseSerializer.Logger;
            StrongWrapper<ChunkType<T>> wrapper = PrepareWrapper (baseSerializer);
            FlushIfNeeded (logger, wrapper, baseSerializer);
        }

        private void InternalFlush (LoggerDelegate logger, StrongWrapper<ChunkType<T>> wrapper, ISerializerType baseSerializer)
        {
            // Note: we assume the caller has already done a lock(DirtyChunks) before getting here.

            if (_dirtyChunks.Count > 0)
            {
                List<ExternalIDType> identifiersOfChunksToBePurged = MergeSparseChunksTogether (baseSerializer);

                if (_dirtyChunks.Count > 0)      // the merging process may have eliminated all of the dirty chunks
                {
                    wrapper.SaveItems (_dirtyChunks.Values, logger);
                    _dirtyChunks.Clear ();    // release memory now that we have saved the items successfully (avoid piling up huge amounts of memory)
                }

                if (identifiersOfChunksToBePurged.Count > 0)
                {
                    wrapper.PurgeItems (identifiersOfChunksToBePurged);
                }
            }
        }

        private List<ExternalIDType> MergeSparseChunksTogether (ISerializerType baseSerializer)
        {
            // Before saving the dirty chunks, look to see if any of them are sparse enough be merged with an adjacent chunk.
            // This prevents long-term buildup of extremely sparse chunks, which would cancel out the benefits of having chunks in the first place.

            List<ChunkType<T>> chunksToBePurged = new List<ChunkType<T>> ();

            int mergeThreshold = MaximumChunkSize / 2;      // it is OK if MaximumChunkSize==1 and mergeThreshold==0 (I thought of that!)

            // We need to iterate over all chunks that are currently marked dirty, but we also need to mark
            // new chunks as dirty inside that same loop.
            // Avoid iterating over the same collection that gets changed inside the loop...
            List<ChunkType<T>> chunksThatWereAlreadyDirty = new List<ChunkType<T>> (_dirtyChunks.Values);
            foreach (ChunkType<T> chunk in chunksThatWereAlreadyDirty)
            {
                if (chunk.Values.Count <= mergeThreshold)
                {
                    // If we are going to merge, we will have to dirty *both* adjacent chunks in order to fix up the prev/next links.
                    // Special case: this may be the first and/or last chunk, in which case we have to fix up links in the scalable property itself.

                    if (chunk.NextChunkIdentifier == NullChunkIdentifier)
                    {
                        // This is the last chunk in the chain.
                        if (chunk.PrevChunkIdentifier == NullChunkIdentifier)
                        {
                            // This is the lone chunk.  See if we can merge into the FrontList or BackList.
                            if (FrontList.Count + chunk.Values.Count <= MaximumChunkSize)
                            {
                                FirstOverflowChunkIdentifier = chunk.NextChunkIdentifier;
                                LastOverflowChunkIdentifier = chunk.PrevChunkIdentifier;
                                chunksToBePurged.Add (chunk);
                                FrontList.AddRange (chunk.Values);
                            }
                            else if (BackList.Count + chunk.Values.Count <= MaximumChunkSize)
                            {
                                FirstOverflowChunkIdentifier = chunk.NextChunkIdentifier;
                                LastOverflowChunkIdentifier = chunk.PrevChunkIdentifier;
                                chunksToBePurged.Add (chunk);
                                chunk.Values.AddRange (BackList);
                                BackList = chunk.Values;
                            }
                        }
                        else
                        {
                            // This is the last chunk, but there is at least one chunk before this one.
                            ChunkType<T> prev = LoadChunk (chunk.PrevChunkIdentifier, false, baseSerializer);

                            // We prefer to merge into the BackList if possible...
                            if (BackList.Count + chunk.Values.Count <= MaximumChunkSize)
                            {
                                LastOverflowChunkIdentifier = chunk.PrevChunkIdentifier;
                                MarkAsDirty (prev);
                                prev.NextChunkIdentifier = chunk.NextChunkIdentifier;
                                chunksToBePurged.Add (chunk);
                                chunk.Values.AddRange (BackList);
                                BackList = chunk.Values;
                            }
                            else if (prev.Values.Count + chunk.Values.Count <= MaximumChunkSize)
                            {
                                // We can't merge into the BackList, but we can merge into the penultimate chunk (prev)...
                                LastOverflowChunkIdentifier = chunk.PrevChunkIdentifier;
                                MarkAsDirty (prev);
                                prev.NextChunkIdentifier = chunk.NextChunkIdentifier;
                                chunksToBePurged.Add (chunk);
                                prev.Values.AddRange (chunk.Values);                                
                            }
                        }
                    }
                    else
                    {
                        // There is at least one chunk after this dirty chunk.
                        ChunkType<T> next = LoadChunk (chunk.NextChunkIdentifier, false, baseSerializer);
                        if (chunk.PrevChunkIdentifier == NullChunkIdentifier)
                        {
                            // This is the first chunk, but there is at least one chunk after this one.
                            // We prefer to merge into the FrontList if possible...
                            if (FrontList.Count + chunk.Values.Count <= MaximumChunkSize)
                            {
                                FirstOverflowChunkIdentifier = chunk.NextChunkIdentifier;
                                MarkAsDirty (next);
                                next.PrevChunkIdentifier = chunk.PrevChunkIdentifier;
                                chunksToBePurged.Add (chunk);
                                FrontList.AddRange (chunk.Values);
                            }
                            else if (next.Values.Count + chunk.Values.Count <= MaximumChunkSize)
                            {
                                // We can't merge into the FrontList, but we can merge into the second chunk (next)...
                                FirstOverflowChunkIdentifier = chunk.NextChunkIdentifier;
                                MarkAsDirty (next);
                                next.PrevChunkIdentifier = chunk.PrevChunkIdentifier;
                                chunksToBePurged.Add (chunk);
                                chunk.Values.AddRange (next.Values);
                                next.Values = chunk.Values;
                            }
                        }
                        else
                        {
                            // This chunk is sandwiched between two other chunks.
                            // Pick the emptier of the adjacent chunks (prev or next) as the potential merge target.
                            // If exactly one of the adjacent chunks has a short enough list to allow a merger,
                            // it logically follows that it will be the emptier of the two.
                            // Furthermore, if both are short enough, it is better to pick the shorter one
                            // to minimize sparseness over time.
                            ChunkType<T> prev = LoadChunk (chunk.PrevChunkIdentifier, false, baseSerializer);
                            if (prev.Values.Count < next.Values.Count)
                            {
                                if (prev.Values.Count + chunk.Values.Count <= MaximumChunkSize)
                                {
                                    // The chunk before is emptier than the chunk after, so merge into the chunk before.
                                    MarkAsDirty (prev);
                                    MarkAsDirty (next);
                                    prev.NextChunkIdentifier = chunk.NextChunkIdentifier;
                                    next.PrevChunkIdentifier = chunk.PrevChunkIdentifier;
                                    chunksToBePurged.Add (chunk);
                                    prev.Values.AddRange (chunk.Values);
                                }
                            }
                            else
                            {
                                if (next.Values.Count + chunk.Values.Count <= MaximumChunkSize)
                                {
                                    // Merge into the chunk after, because it is the same size or emptier.
                                    MarkAsDirty (prev);
                                    MarkAsDirty (next);
                                    prev.NextChunkIdentifier = chunk.NextChunkIdentifier;
                                    next.PrevChunkIdentifier = chunk.PrevChunkIdentifier;
                                    chunksToBePurged.Add (chunk);
                                    chunk.Values.AddRange (next.Values);
                                    next.Values = chunk.Values;
                                }
                            }
                        }
                    }
                }
            }

            List<ExternalIDType> identifiersOfChunksToBePurged = new List<ExternalIDType> ();
            foreach (ChunkType<T> chunk in chunksToBePurged)
            {
                identifiersOfChunksToBePurged.Add (new ExternalIDType (chunk.ExternalIdentifier));

                // Whichever chunks are about to be purged should no longer be considered dirty:
                // we don't want to save those chunks and delete them... we want to just delete them!
                _dirtyChunks.Remove (chunk.ExternalIdentifier);

                // Just to be a little paranoid, make sure all the chunks to be deleted are in an invalid state,
                // so as to catch any attempt to access them later.  The intent is to catch bugs in methods like
                // RemoveAllMatching, where I am worried about iterating through the same chunk list that we
                // are doing surgery on here.
                chunk.NextChunkIdentifier = null;
                chunk.PrevChunkIdentifier = null;
                chunk.ExternalIdentifier = null;
                chunk.Values = null;
            }

            return identifiersOfChunksToBePurged;
        }

        /// <summary>
        /// Searches the ordered sequence starting at the front and moving forwards.
        /// The filter delegate is called for each value in that order.  
        /// As soon as the delegate returns true, the search stops, that value is removed from the sequence, and RemoveFirst returns true.
        /// If the filter delegate never returns true (or is never called because the sequence is empty), RemoveFirst returns false.
        /// The caller is responsible for re-writing the scalable property to the store if this method returns true.
        /// The caller is allowed to remove multiple items before re-writing, though, for increased efficiency.
        /// </summary>
        /// <param name="wrapperBase">wrapper base</param>
        /// <param name="filter">A delegate which accepts a value and returns true if that value should be deleted.</param>
        /// <returns>True if exactly 1 value was deleted, false if 0 values were deleted.</returns>
        public bool RemoveFirst (StrongWrapperBase wrapperBase, FilterDelegate filter)
        {
            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);

            if (RemoveFirstFromList (filter, FrontList))
            {
                return true;
            }

            string id = FirstOverflowChunkIdentifier;
            while (id != NullChunkIdentifier)
            {
                ChunkType<T> chunk = LoadChunk (id, false, baseSerializer);
                if (RemoveFirstFromList (filter, chunk.Values))
                {
                    MarkAsDirty (chunk);
                    FlushIfNeeded (baseSerializer);
                    return true;
                }

                id = chunk.NextChunkIdentifier;
            }

            return RemoveFirstFromList (filter, BackList);
        }

        /// <summary>
        /// Searches the ordered sequence starting at the back and moving backwards.
        /// The filter delegate is called for each value in that order.  
        /// As soon as the delegate returns true, the search stops, that value is removed from the sequence, and RemoveLast returns true.
        /// If the filter delegate never returns true (or is never called because the sequence is empty), RemoveLast returns false.
        /// The caller is responsible for re-writing the scalable property to the store if this method returns true.
        /// The caller is allowed to remove multiple items before re-writing, though, for increased efficiency.
        /// </summary>
        /// <param name="wrapperBase">wrapper base</param>
        /// <param name="filter">A delegate which accepts a value and returns true if that value should be deleted.</param>
        /// <returns>True if exactly 1 value was deleted, false if 0 values were deleted.</returns>
        public bool RemoveLast (StrongWrapperBase wrapperBase, FilterDelegate filter)
        {
            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);

            if (RemoveLastFromList (filter, BackList))
            {
                return true;
            }

            string id = LastOverflowChunkIdentifier;
            while (id != NullChunkIdentifier)
            {
                ChunkType<T> chunk = LoadChunk (id, false, baseSerializer);
                if (RemoveLastFromList (filter, chunk.Values))
                {
                    MarkAsDirty (chunk);
                    FlushIfNeeded (baseSerializer);
                    return true;
                }

                id = chunk.PrevChunkIdentifier;
            }

            return RemoveLastFromList (filter, FrontList);
        }


        /// <summary>
        /// This method searches the entire sequence for all values for which the filter delegate returns true.
        /// All such values are deleted from the sequence.
        /// Warning: this can be very expensive if there are millions of chunks!
        /// It is far better to use RemoveFirst or RemoveLast if you are pretty sure there is only a single value to be deleted.
        /// However, RemoveAllMatching is more efficient if you have to delete a lot of values, as it will be O(N), 
        /// instead of the O(N^2) behavior if you call RemoveFirst/RemoveLast in a loop.
        /// The caller is responsible for re-saving the scalable property to the store if this method returns a nonzero value.
        /// The caller is allowed to call this method and/or RemoveFirst and/or RemoveLast multiple times before saving, for increased efficiency.
        /// </summary>
        /// <param name="wrapperBase">wrapper base</param>
        /// <param name="filter">A delegate which accepts a value and returns true if that value should be deleted.
        /// The caller must not expect the delegate to be called with values in any particular order.
        /// Also, it is possible for the same value to be passed to the delegate more than one time.</param>
        /// <returns>The total number of values that were deleted.</returns>
        public long RemoveAllMatching (StrongWrapperBase wrapperBase, FilterDelegate filter)
        {
            ISerializerType baseSerializer = wrapperBase.GetSerializer (ChunkConnection.SerializerKind);

            // We remove from the FrontList and BackList first, so as to make room for merging into these preferred areas...
            long numberOfValuesRemoved = RemoveAllMatchingFromList (filter, FrontList);
            numberOfValuesRemoved += RemoveAllMatchingFromList (filter, BackList);

            // Weird detail: we have to completely Flush first to make sure the only dirty chunks
            // are the ones "behind" us when we iterate.  Otherwise the iteration could fail.
            Flush (baseSerializer);

            string id = FirstOverflowChunkIdentifier;
            while (id != NullChunkIdentifier)
            {
                ChunkType<T> chunk = LoadChunk (id, false, baseSerializer);
                id = chunk.NextChunkIdentifier;     // pre-fetch the next chunk's id, because current chunk might get deleted

                long numDeletedInChunk = RemoveAllMatchingFromList (filter, chunk.Values);
                if (numDeletedInChunk > 0)
                {
                    numberOfValuesRemoved += numDeletedInChunk;
                    MarkAsDirty (chunk);

                    // This call to FlushIfNeeded is very scary, because we keep iterating after 
                    // a possible set of chunk merges, which changes the doubly-linked list we are iterating over.
                    // I have taken care to make this safe by 
                    //     (1) fully flushing before the loop and 
                    //     (2) pre-fetching the ID of the next chunk.
                    // The idea is that the current chunk, or any chunks before it, can be deleted,
                    // but no chunk after the current chunk will be deleted, so the next ID will still be valid.
                    // This relies on a quirk of the merge algorithm: it never deletes a chunk that wasn't 
                    // already in the dirty set.
                    // If we delete the current chunk and move its items into the next chunk,
                    // we assume that calling the delegate again for these items is OK.
                    FlushIfNeeded (baseSerializer);
                }
            }

            return numberOfValuesRemoved;
        }


        private long RemoveAllMatchingFromList (FilterDelegate filter, List<T> list)
        {
            long numberOfValuesRemoved = 0;
            int index = 0;
            while (index < list.Count)
            {
                if (filter (list[index]))
                {
                    ++numberOfValuesRemoved;
                    list.RemoveAt (index);
                    --Count;
                }
                else
                {
                    ++index;
                }
            }
            return numberOfValuesRemoved;
        }


        private bool RemoveFirstFromList (FilterDelegate filter, List<T> list)
        {
            for (int index = 0; index < list.Count; ++index)
            {
                if (filter (list[index]))
                {
                    list.RemoveAt (index);
                    --Count;
                    return true;
                }
            }

            return false;
        }


        private bool RemoveLastFromList (FilterDelegate filter, List<T> list)
        {
            for (int index = list.Count - 1; index >= 0; --index)
            {
                if (filter (list[index]))
                {
                    list.RemoveAt (index);
                    --Count;
                    return true;
                }
            }

            return false;
        }


        private StrongWrapper<ChunkType<T>> PrepareWrapper (ISerializerType serializer)
        {
            SanityCheck ();
            lock (ChunkWrapper)
            {
                if (!ChunkWrapper.ContainsKey (CollectionName))
                {
                    ISerializerType chunkSerializer = 
                        new Serializer.WrapperSerializerType(
                            typeof(T), 
                            typeof(ChunkType<>),  
                            serializer);
                    StrongWrapper<ChunkType<T>> wrapper = 
                        new StrongWrapper<ChunkType<T>>(
                            new CIP4ItemAttribute(CollectionName, true), 
                            ChunkConnection, 
                            serializer.Logger);
                    wrapper.SetSerializer (chunkSerializer, ChunkConnection.SerializerKind);
                    wrapper.GetWrapperCollection().Name = wrapper.ItemAttribute.Collection;
                    ChunkWrapper.Add(CollectionName, wrapper);

                }
                return ChunkWrapper[CollectionName];
            }
        }

        private void MarkAsDirty (ChunkType<T> chunk)
        {
            lock (_dirtyChunks)
            {
                if (!_dirtyChunks.ContainsKey (chunk.ExternalIdentifier))
                {
                    _dirtyChunks.Add (chunk.ExternalIdentifier, chunk);
                }
            }
        }

        private ChunkType<T> NewChunk(string prevID, string nextID)
        {
            ChunkType<T> chunk = new ChunkType<T>();

            chunk.ExternalIdentifier = Guid.NewGuid().ToString("N");    // e.g. "3ce56dab99fb4d409b8aae8541f97a53"
            chunk.PrevChunkIdentifier = prevID;
            chunk.NextChunkIdentifier = nextID;
            chunk.Values = new List<T> ();

            MarkAsDirty (chunk);       // so that we know we need to write this chunk the next time we serialize

            return chunk;
        }

        private ChunkType<T> LoadChunk(string id, bool markAsDirty, ISerializerType serializer)
        {
            LoggerDelegate logger = serializer.Logger;

            if (id == null)
            {
                throw new NullReferenceException ("Chunk identifier must not be null.");
            }
            if (id == NullChunkIdentifier)
            {
                // This is a logic error, because null/empty string is analogous to a null object reference as a link.
                throw new ArgumentException ("Attempt to load an overflow chunk using a null/empty identifier.");
            }
            lock (_dirtyChunks)
            {
                // Under no circumstance do we want to re-load a chunk from storage if we have already loaded
                // it and there are pending changes waiting to be written back to storage.
                // Doing that would cause inconsistent versions of the same chunk to exist in memory.
                // So we look in the DirtyChunks dictionary to see if the desired chunk is sitting in memory, waiting to be flushed.

                if (_dirtyChunks.ContainsKey (id))
                {
                    return _dirtyChunks[id];
                }

                // We have to go to the underlying store to retrieve the chunk.
                StrongWrapper<ChunkType<T>> wrapper = PrepareWrapper(serializer);
                ChunkType<T> chunk = wrapper.GetViaExternalID(ExternalIDType.ExternalIDFromEscapedString(id), logger);
                if (chunk == null)
                {
                    throw new ArgumentException (string.Format ("Could not find overflow chunk whose external identifier is {0}", id));
                }
                if (chunk.ExternalIdentifier != id)
                {
                    throw new ApplicationException (string.Format ("Internal error: we asked for {0} but we got back {1}", id, chunk.ExternalIdentifier));
                }

                if (markAsDirty)
                {
                    // The caller is telling us that he is definitely going to make changes to this chunk.
                    // Since we already have DirtyChunks locked, this a thread-safe time to mark the chunk as dirty.
                    _dirtyChunks.Add (id, chunk);
                }

                return chunk;
            }
        }

        private const string NullChunkIdentifier = null;

        private void SanityCheck ()
        {
            if (MaximumChunkSize < 0)
            {
                throw new ApplicationException ("MaximumChunkSize must be a positive integer.");
            }

            if (DirtyChunkThreshold < 0)
            {
                throw new ApplicationException ("DirtyChunkThreshold must be a positive integer.");
            }

            if (string.IsNullOrEmpty (CollectionName))
            {
                throw new ApplicationException ("CollectionName was not set to a valid value.");
            }

            if (FrontList == null)
            {
                throw new NullReferenceException ("FrontList is null.");
            }

            if (BackList == null)
            {
                throw new NullReferenceException ("BackList is null.");
            }
        }
    }
}
