using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Misakai.Kafka;

namespace Reflexive.IO.Kafka
{
	internal sealed class KafkaConsumer :  IDisposable
	{
		private readonly ConsumerOptions _options;
		private readonly BlockingCollection<Message> _fetchResponseQueue;
		private readonly CancellationTokenSource _disposeToken = new CancellationTokenSource();
		private readonly ConcurrentDictionary<int, Task> _partitionPollingIndex = new ConcurrentDictionary<int, Task>();
		private readonly ConcurrentDictionary<int, long> _partitionOffsetIndex = new ConcurrentDictionary<int, long>();
		private readonly IScheduledTimer _topicPartitionQueryTimer;
        private readonly MetadataQueries _metadataQueries;
		private int _disposeCount;
		private int _ensureOneThread;
		private Topic _topic;
        private int _maxFetchBufferSize;
		public int ConsumerTaskCount
		{
			get
			{
				return this._partitionPollingIndex.Count;
			}
		}
        public KafkaConsumer(ConsumerOptions options, int maxFetchBufferSize = Int32.MaxValue, params OffsetPosition[] positions)
		{
			this._options = options;
            this._maxFetchBufferSize = maxFetchBufferSize;
			this._fetchResponseQueue = new BlockingCollection<Message>(this._options.ConsumerBufferSize);
			this._metadataQueries = new MetadataQueries(this._options.Router);
			this._topicPartitionQueryTimer = new ScheduledTimer().Do(new Action(this.RefreshTopicPartitions)).Every(TimeSpan.FromMilliseconds((double)this._options.TopicPartitionQueryTimeMs)).StartingAt(DateTime.Now);
			this.SetOffsetPosition(positions);
		}
		public IEnumerable<Message> Consume(CancellationToken? cancellationToken = null)
		{
			this._options.Log.DebugFormat("Consumer: Beginning consumption of topic: {0}", new object[]
			{
				this._options.Topic
			});
			this._topicPartitionQueryTimer.Begin();
			return this._fetchResponseQueue.GetConsumingEnumerable(cancellationToken ?? CancellationToken.None);
		}
		public void SetOffsetPosition(params OffsetPosition[] positions)
		{
			for (int j = 0; j < positions.Length; j++)
			{
				OffsetPosition offsetPosition = positions[j];
				OffsetPosition temp = offsetPosition;
				this._partitionOffsetIndex.AddOrUpdate(offsetPosition.PartitionId, (int i) => temp.Offset, (int i, long l) => temp.Offset);
			}
		}
		public List<OffsetPosition> GetOffsetPosition()
		{
			return (
				from x in this._partitionOffsetIndex
				select new OffsetPosition
				{
					PartitionId = x.Key,
					Offset = x.Value
				}).ToList<OffsetPosition>();
		}
		private void RefreshTopicPartitions()
		{
			try
			{
				if (Interlocked.Increment(ref this._ensureOneThread) == 1)
				{
					this._options.Log.DebugFormat("Consumer: Refreshing partitions for topic: {0}", new object[]
					{
						this._options.Topic
					});
					List<Topic> topicMetadata = this._options.Router.GetTopicMetadata(new string[]
					{
						this._options.Topic
					});
					if (topicMetadata.Count <= 0)
					{
						throw new ApplicationException(string.Format("Unable to get metadata for topic:{0}.", this._options.Topic));
					}
					this._topic = topicMetadata.First<Topic>();
					foreach (Partition current in this._topic.Partitions)
					{
						int partitionId = current.PartitionId;
						if (this._options.PartitionWhitelist.Count == 0 || this._options.PartitionWhitelist.Any((int x) => x == partitionId))
						{
							this._partitionPollingIndex.AddOrUpdate(partitionId, (int i) => this.ConsumeTopicPartitionAsync(this._topic.Name, partitionId), (int i, Task task) => task);
						}
					}
				}
			}
			catch (Exception ex)
			{
				this._options.Log.ErrorFormat("Exception occured trying to setup consumer for topic:{0}.  Exception={1}", new object[]
				{
					this._options.Topic,
					ex
				});
			}
			finally
			{
				Interlocked.Decrement(ref this._ensureOneThread);
			}
		}
		private Task ConsumeTopicPartitionAsync(string topic, int partitionId)
		{
			return Task.Factory.StartNew(delegate
			{
				try
				{
					this._options.Log.DebugFormat("Consumer: Creating polling task for topic: {0} on parition: {1}", new object[]
					{
						topic,
						partitionId
					});
					while (!this._disposeToken.IsCancellationRequested)
					{
						try
						{
							long offset = 0L;
							this._partitionOffsetIndex.AddOrUpdate(partitionId, (int i) => offset, delegate(int i, long currentOffset)
							{
								offset = currentOffset;
								return currentOffset;
							});
							List<Fetch> fetches = new List<Fetch>
							{
								new Fetch
								{
									Topic = topic,
									PartitionId = partitionId,
									Offset = offset,
                                    MaxBytes  = _maxFetchBufferSize 
								}
							};
							FetchRequest request = new FetchRequest
							{
								Fetches = fetches
							};
							BrokerRoute brokerRoute = this._options.Router.SelectBrokerRoute(topic, partitionId);
							List<FetchResponse> result = brokerRoute.Connection.SendAsync<FetchResponse>(request).Result;
							if (result.Count > 0)
							{
								FetchResponse fetchResponse = result.FirstOrDefault<FetchResponse>();
								if (fetchResponse != null && fetchResponse.Messages.Count > 0)
								{
									foreach (Message current in fetchResponse.Messages)
									{
										this._fetchResponseQueue.Add(current, this._disposeToken.Token);
										if (this._disposeToken.IsCancellationRequested)
										{
											return;
										}
									}
									long nextOffset = fetchResponse.Messages.Max((Message x) => x.Offset) + 1L;
									this._partitionOffsetIndex.AddOrUpdate(partitionId, (int i) => nextOffset, (int i, long l) => nextOffset);
									continue;
								}
							}
							Thread.Sleep(this._options.BackoffInterval);
						}
						catch (Exception ex)
						{
							this._options.Log.ErrorFormat("Exception occured while polling topic:{0} partition:{1}.  Polling will continue.  Exception={2}", new object[]
							{
								topic,
								partitionId,
								ex
							});
						}
					}
				}
				finally
				{
					this._options.Log.DebugFormat("Consumer: Disabling polling task for topic: {0} on parition: {1}", new object[]
					{
						topic,
						partitionId
					});
					Task task;
					this._partitionPollingIndex.TryRemove(partitionId, out task);
				}
			});
		}
		public Topic GetTopic(string topic)
		{
			return this._metadataQueries.GetTopic(topic);
		}

        public List<OffsetResponse> GetTopicOffsetSync(string topic)
        {
            var response = GetTopicOffsetAsync(topic);
            response.Wait();
            return response.Result;
        }

		public Task<List<OffsetResponse>> GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1)
		{
			return this._metadataQueries.GetTopicOffsetAsync(topic, maxOffsets, time);
		}
		public void Dispose()
		{
			if (Interlocked.Increment(ref this._disposeCount) != 1)
			{
				return;
			}
			this._options.Log.DebugFormat("Consumer: Disposing...", new object[0]);
			this._disposeToken.Cancel();
			this._topicPartitionQueryTimer.End();
			foreach (Task current in 
				from task in this._partitionPollingIndex.Values
				where task != null
				select task)
			{
				current.Wait(TimeSpan.FromSeconds(5.0));
			}
			using (this._topicPartitionQueryTimer)
			{
				using (this._metadataQueries)
				{
					using (this._disposeToken)
					{
					}
				}
			}
		}
	}
}
