﻿using System;
using System.Collections.Concurrent;
using System.Diagnostics.Contracts;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;

namespace O1.Kernel.IO.Storage
{
    [ContractVerification(true)]
    internal abstract class AbstractFixedStore<T> : IDisposable
    {
        private const int PaddingSize = 1024;

        private readonly BlockingCollection<StorageTask> commandQueue = new BlockingCollection<StorageTask>();

        private readonly CancellationTokenSource workerCancellation;

        private readonly BlockArray<byte[]> storage;

        private readonly SequenceCache32 counter;

        private readonly ConcurrentQueue<int> deleted;

        public AbstractFixedStore(BlockArray<byte[]> storage, int maxValue, int[] deleted)
        {
            Contract.Requires(storage != null);
            Contract.Requires(deleted != null);
            Contract.Requires(maxValue > -1);

            this.storage = storage;

            // Sychronized blocks of 100 are fine. In the event of a failure, the restoration process
            // will 'know' the highest id and will re-initialize the counter to a nearly contiguous value.
            this.counter = new SequenceCache32(new DurableCounter(maxValue), 100);
            this.deleted = new ConcurrentQueue<int>(deleted);
            this.workerCancellation = this.InitializeWorker();
        }

        protected abstract int RecordSize { get; }

        public void Dispose()
        {
            this.Dispose(true);
            GC.SuppressFinalize(this);
        }

        protected abstract void WriteItem(T item, byte[] buffer, int recordOffset);

        protected abstract T ReadItem(byte[] buffer, int recordOffset);

        protected void EnqueueTasks(params StorageTask[] tasks)
        {
            // TODO: should this be synchornized as a unit?
            for (int i = 0; i < tasks.Length; ++i)
            {
                this.commandQueue.Add(tasks[i]);
            }
        }

        protected bool TryReadItem(TransactionToken token, int targetId, out T item)
        {
            Contract.Requires(token != null);

            if (targetId >= this.storage.Length)
            {
                item = default(T);
                return false;
            }

            var buffer = this.storage[targetId];
            if (buffer == null)
            {
                item = default(T);
                return false;
            }

            Contract.Assume(buffer.Length > Sz.CLR.Int64);

            var offset = 0;
            var transactionId = buffer.ToUInt64(offset);
            if (!token.IsReadVisible(transactionId))
            {
                var size = this.RecordSize;
                Contract.Assume(size > 0);

                offset = size + Sz.CLR.Int64;
                var visible = false;
                while (!(offset + Sz.CLR.Int64 > buffer.Length))
                {
                    transactionId = buffer.ToUInt64(offset);
                    if (token.IsReadVisible(transactionId))
                    {
                        visible = true;
                        break;
                    }

                    offset += size;
                }

                // TODO: End of the road, nothing readable.
                // Correctness here will rely heavily on proper DML handling...
                // Version clean ups will have to lag behind transaction completion.
                // By putting latest changes first, the first visible value will be used.
                if (!visible)
                {
                    item = default(T);
                    return false;
                }
            }

            item = this.ReadItem(buffer, offset);
            return true;
        }

        protected void Dispose(bool disposing)
        {
            // TODO: Disposal of a backing store is meaningful...
            // What is the persistency implication?
            if (disposing)
            {
                this.counter.Dispose();
                this.workerCancellation.Cancel();
            }
        }

        private CancellationTokenSource InitializeWorker()
        {
            CancellationTokenSource cancellation = new CancellationTokenSource();
            Action pendDml = () =>
                {
                    foreach (var command in this.commandQueue.GetConsumingEnumerable())
                    {
                        Contract.Assume(command != null);

                        // TODO: Process DML command, set up transaction scope and isolation level...etc.
                        // TX will always be on an isolated thread from the caller. Isolation level must be in cell data (version)
                        // TODO: the command buffer may have to allow for byte marks to identify Tx boundaries.
                        switch (command.Operation)
                        {
                            case DmlOperation.None:
                                {
                                    this.PendLinkage(command as LinkingTask);
                                    break;
                                }

                            case DmlOperation.Add:
                                {
                                    this.PendAdd(command as DataTask<T>);
                                    break;
                                }

                            case DmlOperation.Delete:
                                {
                                    this.PendDelete(command as DataTask<T>);
                                    break;
                                }

                            case DmlOperation.Update:
                                {
                                    this.PendUpdate(command as DataTask<T>);
                                    break;
                                }
                        }
                    }
                };

            Task.Factory.StartNew(pendDml, cancellation.Token);
            return cancellation;
        }

        private void PendLinkage(LinkingTask task)
        {
            Contract.Requires(task != null);
            Contract.Requires(task.Token != null);

            var size = this.RecordSize;
            var targetId = task.TargetId;
            var token = task.Token;

            var buffer = this.storage[targetId];
            if (buffer == null)
            {
                task.State = TaskState.Failed;
                return;
            }

            Contract.Assume(buffer.Length > Sz.CLR.Int64);
            if (!buffer.CheckConcurrency(0, token, targetId))
            {
                // TODO: What are the concurrency implications to linking? To many possible failures?
                // How can link integrity be maintained and always succeed regardless of pending commit order?
                // Will there be a hot spot when adding edges?
                // This method will need to look at serialization order and handle sequencing instead of failing.
                // Or, simply fail, and the calling code can elect to retry the linkage n times, and possibly somewhere else
                // in the linkage chain.
                task.State = TaskState.Failed;
                return;
            }

            Contract.Assume(size > Sz.CLR.Int64);
            Contract.Assume(buffer.Length > size);
            var newBuffer = buffer.ExpandAndShift(size);

            Contract.Assert(Sz.CLR.Int64 + size <= newBuffer.Length);
            Contract.Assume(Sz.CLR.Int64 + size <= buffer.Length);

            Array.Copy(buffer, Sz.CLR.Int64, newBuffer, Sz.CLR.Int64, size);
            token.TransactionId.EmbedOperation(DmlOperation.None).CopyBytes(buffer, 0);
            var newHeader = task.LinkPrevious ?
                   new Linkage(task.LinkedId, task.Item.NextId) :
                   new Linkage(task.Item.PreviousId, task.LinkedId);

            Contract.Assume(task.LinkOffset + Linkage.Size < newBuffer.Length);
            newHeader.Write(newBuffer, task.LinkOffset);
            task.State = TaskState.Pended;
        }

        private void PendUpdate(DataTask<T> task)
        {
            Contract.Requires(task != null);
            Contract.Requires(task.Token != null);

            var targetId = task.TargetId;
            var buffer = this.storage[targetId];
            if (buffer == null)
            {
                task.State = TaskState.Failed;
                return;
            }

            var token = task.Token;
            Contract.Assume(buffer.Length > Sz.CLR.Int64);
            if (!buffer.CheckConcurrency(0, token, targetId))
            {
                task.State = TaskState.Failed;
                return;
            }

            var size = this.RecordSize;

            Contract.Assume(size > Sz.CLR.Int64);
            Contract.Assume(buffer.Length > size);
            var newBuffer = buffer.ExpandAndShift(size);
            this.WriteItem(task.Item, newBuffer, Sz.CLR.Int64);
            token.TransactionId.EmbedOperation(DmlOperation.Update).CopyBytes(buffer, 0);
            this.storage[targetId] = newBuffer;
            task.State = TaskState.Pended;
        }

        private void PendDelete(DataTask<T> task)
        {
            Contract.Requires(task != null);
            Contract.Requires(task.Token != null);

            var targetId = task.TargetId;
            var buffer = this.storage[targetId];
            if (buffer == null)
            {
                task.State = TaskState.Failed;
                return;
            }

            var token = task.Token;
            Contract.Assume(buffer.Length > Sz.CLR.Int64);
            if (!buffer.CheckConcurrency(0, token, targetId))
            {
                task.State = TaskState.Failed;
                return;
            }

            var size = this.RecordSize;
            Contract.Assume(size > Sz.CLR.Int64);

            Contract.Assume(buffer.Length > size);
            var newBuffer = buffer.ExpandAndShift(size);
            token.TransactionId.EmbedOperation(DmlOperation.Delete).CopyBytes(newBuffer, 0);
            this.storage[targetId] = newBuffer;
            this.deleted.Enqueue(targetId);
            task.State = TaskState.Pended;
        }

        private void PendAdd(DataTask<T> task)
        {
            Contract.Requires(task != null);
            Contract.Requires(task.Token != null);

            int targetId;
            if (!this.deleted.TryDequeue(out targetId))
            {
                targetId = this.counter.NextId();
            }

            task.TargetId = targetId;

            Contract.Assume(targetId > -1);

            var size = this.RecordSize;
            Contract.Assume(size > Sz.CLR.Int64);
            var buffer = new byte[size];

            this.WriteItem(task.Item, buffer, Sz.CLR.Int64);
            task.Token.TransactionId.EmbedOperation(DmlOperation.Add).CopyBytes(buffer, 0);

            if (targetId >= this.storage.Length)
            {
                this.storage.Expand(targetId + PaddingSize);
            }

            this.storage[targetId] = buffer;
            task.State = TaskState.Pended;
        }

        [ContractInvariantMethod]
        private void ObjectInvariant()
        {
            Contract.Invariant(this.storage != null);
            Contract.Invariant(this.counter != null);
            Contract.Invariant(this.deleted != null);
            Contract.Invariant(this.commandQueue != null);
            Contract.Invariant(this.workerCancellation != null);
        }
    }
}