﻿#region LicenseHeader

// Copyright 2012 The Trustees of Indiana University.  All rights reserved.
// 
// The Indiana University licenses this file to you under the Apache License, 
// Version 2.0 (the "License"); you may not use this file except in compliance 
// with the License.  You may obtain a copy of the License at 
// 
// http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software 
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
// See the License for the specific language governing permissions and 
// limitations under the License.
// 
// @author Thilina Gunarathne (tgunarat@indiana.edu)

#endregion

using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
using AzureMRCore.Client;
using AzureMRCore.DataModel;
using AzureMRCore.DataTypes;
using AzureMRCore.Drivers;
using AzureMRCore.MapRed;
using AzureMRCore.OutputCollectors;
using AzureMRCore.OutputFormat;
using Microsoft.WindowsAzure;
using Microsoft.WindowsAzure.StorageClient;

namespace AzureMRCore.DataCommunication
{
    public class DataCommunicator
    {
        private static DataCommunicator _dataCommunicator;
        private CloudBlobClient _blobClient;
        private MRConf _mrConf;
        private CloudStorageAccount _storageAccount;
        private string _tableBaseAddress;

        private DataCommunicator(MRConf mrConf, CloudStorageAccount storageAccount)
        {
            _mrConf = mrConf;
            _storageAccount = storageAccount;
            _blobClient = storageAccount.CreateCloudBlobClient();
            _tableBaseAddress = storageAccount.TableEndpoint.ToString();
        }

        [MethodImpl(MethodImplOptions.Synchronized)]
        public static DataCommunicator GetInstance(MRConf mrConf, CloudStorageAccount storageAccount)
        {
            return _dataCommunicator ?? (_dataCommunicator = new DataCommunicator(mrConf, storageAccount));
        }

        public void TransmitMultipleValues<TOUTKEY, TOUTVALUE>(MapTaskContext mapTaskContext, IOutputFormat<TOUTKEY, TOUTVALUE> outFormat,
                                                                int directTCPTransfer, byte[] reduceProductCounts,
                                                                KeyValuePair<int, List<KeyValuePair<TOUTKEY, TOUTVALUE>>> record) where TOUTKEY : Key, new()
            where TOUTVALUE : IValue, new()
        {
            if (directTCPTransfer > 0)
            {
                // to avoid recreating.. TODO : may be move to a hashmap in a global data structure
                if (mapTaskContext.ReduceMetaDataContext == null)
                {
                    mapTaskContext.ReduceMetaDataContext = new ReduceMetaDataContext(_tableBaseAddress,
                                                                                     _storageAccount.Credentials,
                                                                                     DataModelUtils.
                                                                                         GetReduceTableName(
                                                                                             mapTaskContext.JobID));
                }

                byte[] value = outFormat.ToBytes(record.Value);
                int reduceID = record.Key;
                var task = new Task(() => TransmitMultipleValuesUsingTCP(value, mapTaskContext, reduceID,
                                                                         mapTaskContext.ReduceMetaDataContext));
                task.Start();
                reduceProductCounts[record.Key] = 1;
            }
                // Use table to send multiple values if the size is less than 64kb
            else if (outFormat.GetSize(record.Value) < 63*1024)
            {
                byte[] value = outFormat.ToBytes(record.Value);
                TransmitMultipleValuesInTable(value, mapTaskContext, record.Key,
                                              record.Value.Count);
                reduceProductCounts[record.Key] = 1;
            }
            else
            {
                TransmitDataUsingBlobs(mapTaskContext, outFormat, record, reduceProductCounts);
            }
        }


        public byte[] TransmitSingleValue(MapTaskContext mapTaskContext,
                                          byte[] data, int reduceID, string key)
        {
            var reduceProductCounts = new byte[mapTaskContext.NumReduceTasks];

            // create reduce task or broadcast data table
            string jobID = mapTaskContext.JobID;

            ReduceInputDataContext reduceTaskDataContext =
                mapTaskContext.CreateIntermediateDataTable(reduceID, false);

            var reduceTaskRecord = new ReduceInputDataModel(jobID, reduceID, null,
                                                            mapTaskContext.Iteration, mapTaskContext.MapID)
                                       {
                                           MapID = mapTaskContext.MapID,
                                           PutTime = String.Format("{0:G}", DateTime.Now),
                                           Value = data,
                                           Key = key,
                                           ValueCount = 1
                                       };
            reduceTaskDataContext.AddObject(reduceTaskDataContext.TableName, reduceTaskRecord);

            reduceProductCounts[reduceID] = 1;
            reduceTaskDataContext.SaveChangesWithRetries();
            return reduceProductCounts;
        }

        private bool TransmitMultipleValuesUsingTCP(byte[] data, MapTaskContext mapTaskContext,
                                                    int reduceID, ReduceMetaDataContext reduceDataContext)
        {
            string jobID = mapTaskContext.JobID;

            string[] ids = mapTaskContext.MapID.Split('_');
            int mapIndex = Int32.Parse(ids[0]);
            var item = new DataItem
                           {
                               JobID = jobID,
                               Data = data,
                               //InputFormatType = inputFormatType,
                               Iteration = Int32.Parse(mapTaskContext.Iteration),
                               MapID = mapTaskContext.MapID,
                               MapIndex = mapIndex,
                               NumMapTasks = mapTaskContext.MapTask.NumMapTasks,
                               AppName = mapTaskContext.AppName
                           };

            int retry = 0;
            reduceTableWaitLabel:
            IQueryable<ReduceDataModel> reduceTaskEnum =
                reduceDataContext.ReduceTasks.Where(p =>
                                                    (p.RowKey == reduceID + "_" + mapTaskContext.AppName) &
                                                    (p.PartitionKey == mapTaskContext.Iteration));
            IEnumerator<ReduceDataModel> reduceTaskEnumerator = reduceTaskEnum.GetEnumerator();
            if (reduceTaskEnumerator.MoveNext())
            {
                ReduceDataModel reduceTask = reduceTaskEnumerator.Current;
                if (reduceTask.WorkerRole != null)
                {
                    DataTransferClient.TransferIntermediateData(item, reduceTask.WorkerRole, _mrConf.Factory);
                    return true;
                }
            }
            if (retry < (Constants.NUM_RETRIES*2))
            {
                retry++;
                Thread.Sleep(100);
                goto reduceTableWaitLabel;
            }

            return false;
        }

        private void TransmitMultipleValuesInTable(byte[] data, MapTaskContext mapTaskContext,
                                                   int reduceID, int valueCount)
        {
            ReduceInputDataContext reduceTaskDataContext = mapTaskContext.CreateIntermediateDataTable(reduceID, false);


            var reduceTaskRecord = new ReduceInputDataModel(mapTaskContext.JobID, reduceID, null,
                                                            mapTaskContext.Iteration, mapTaskContext.MapID)
                                       {
                                           MapID = mapTaskContext.MapID,
                                           PutTime = String.Format("{0:G}", DateTime.Now),
                                           Value = data,
                                           ValueCount = valueCount
                                       };
            reduceTaskDataContext.AddObject(reduceTaskDataContext.TableName, reduceTaskRecord);
            reduceTaskDataContext.SaveChangesWithRetries();
        }

        private void TransmitDataUsingBlobs<OUTKEY, OUTVALUE>(MapTaskContext mapTaskContext,
                                                              IOutputFormat<OUTKEY, OUTVALUE> outFormat,
                                                              KeyValuePair<int, List<KeyValuePair<OUTKEY, OUTVALUE>>>
                                                                  record, byte[] reduceProductCounts)
            where OUTKEY : Key, new()
            where OUTVALUE : IValue, new()
        {
            // use outdir directly to store map results, if the job does not have a reduce stage
            string containerName = DataModelUtils.GetIntermediateDataContainer(mapTaskContext.OutDir);
            containerName = containerName.ToLower();
            CloudBlobContainer container = _blobClient.GetContainerReference(containerName);
            container.CreateIfNotExist();
            string firstID = mapTaskContext.MapTask.RowKey + "_" + record.Key;
            // For the momenet disable uploading many files for a reduce task (file per key). this affects the FileOutputFormat.
            Dictionary<string, OUTKEY> intermediateFileRefs = outFormat.UploadValues(record.Value, firstID,
                                                                                     mapTaskContext.MapTask.
                                                                                         PartitionKey,
                                                                                     container);

            ReduceInputDataContext reduceTaskDataContext = mapTaskContext.CreateIntermediateDataTable(record.Key,
                                                                                                      false);

            reduceProductCounts[record.Key] = (byte) intermediateFileRefs.Count;
            foreach (var intermediateFile in intermediateFileRefs)
            {
                //TODO: Make sure the entity gets replaced in case of rerun
                var reduceTaskRecord = new ReduceInputDataModel(mapTaskContext.JobID, record.Key,
                                                                intermediateFile.Key,
                                                                mapTaskContext.Iteration,
                                                                mapTaskContext.MapID)
                                           {
                                               MapID = mapTaskContext.MapID,
                                               PutTime = String.Format("{0:G}", DateTime.Now),
                                               ValueCount = 1
                                           };
                if (intermediateFile.Value != null)
                {
                    reduceTaskRecord.Key = intermediateFile.Value.GetTextValue();
                }
                reduceTaskDataContext.AddObject(reduceTaskDataContext.TableName, reduceTaskRecord);
            }
            reduceTaskDataContext.SaveChangesWithRetries();
        }

        public void ExecuteAllGatherTransfer<OUTKEY, OUTVALUE, BCASTINKEY, BCASTINVALUE>(MapTaskContext mapTaskContext,
                                                                                         MRDriver mrDriver,
                                                                                         PrimitiveConf primitiveConf,
                                                                                         MapOutputCollector
                                                                                             <OUTKEY, OUTVALUE>
                                                                                             mapOutCollector)
            where OUTKEY : Key, new() where OUTVALUE : IValue, new() where BCASTINKEY : Key, new()
            where BCASTINVALUE : IValue, new()
        {
            string inputFormatType = mrDriver.GetBCastInputFormat<BCASTINKEY, BCASTINVALUE>
                ().GetType().AssemblyQualifiedName;
            KeyValuePair<int, List<KeyValuePair<OUTKEY, OUTVALUE>>> reduceEntry =
                mapOutCollector.GetResults().First();
            KeyValuePair<OUTKEY, OUTVALUE> valuePair = reduceEntry.Value.First();
            byte[] data = valuePair.Value.GetBytes();

            BCastToAll(data, inputFormatType, primitiveConf.Iteration, mapTaskContext, Types.AllGather);

            //TODO fix this... Temporarily doing this only for the first map task. But there is not guarantee that it'll be the first to finish)
            if (mapTaskContext.MapID.StartsWith("0"))
            {
                AddIteration(mapTaskContext, primitiveConf, mapTaskContext.JobID);
            }
        }

        private void BCastToAll(byte[] data, string inputFormatType, int iteration,
                                MapTaskContext mapTaskContext, Types bCastType)
        {
            // create reduce task or broadcast data table
            string jobID = mapTaskContext.JobID;

            string[] ids = mapTaskContext.MapID.Split('_');
            int mapIndex = Int32.Parse(ids[0]);
            var item = new DataItem
                           {
                               JobID = jobID,
                               Data = data,
                               InputFormatType = inputFormatType,
                               Iteration = iteration,
                               MapID = mapTaskContext.MapID,
                               MapIndex = mapIndex,
                               NumMapTasks = mapTaskContext.MapTask.NumMapTasks,
                               TransferType = bCastType
                           };
            var task = new Task(() => DataTransferClient.NotifyAllNodes(item, _mrConf.Factory));
            task.Start();
        }

        private void AddIteration(MapTaskContext mapTaskContext, PrimitiveConf primitiveConf, string jobID)
        {
            string tableAddress = _storageAccount.TableEndpoint.ToString();
            var jobBoardDataContext = new JobBoardDataContext(tableAddress, _storageAccount.Credentials,
                                                              DataModelUtils.GetJobBoardTableName());

            var jobContext = new JobDataContext(tableAddress, _storageAccount.Credentials,
                                                DataModelUtils.GetJobTableName());
            var reduceContext = new ReduceMetaDataContext(_storageAccount.TableEndpoint.ToString(),
                                                          _storageAccount.Credentials,
                                                          DataModelUtils.GetReduceTableName(
                                                              mapTaskContext.JobID));

            IQueryable<JobDataModel> jobQueryable =
                jobContext.Jobs.Where(job => job.RowKey == mapTaskContext.JobID);
            JobDataModel jobModel = jobQueryable.FirstOrDefault();
            //TODO add iteration too.. get info from primitive
            var jobIteration = new JobBoardDataModel
                                   {
                                       RowKey = primitiveConf.Iteration.ToString(),
                                       PartitionKey = jobID + "_" + primitiveConf.AppName,
                                       AppName = primitiveConf.AppName,
                                       JobID = jobID,
                                       MapTaskTable = DataModelUtils.GetMapTableName(jobID),
                                       ProgramParams = jobModel.ProgramParams,
                                       NumReduceTasks = primitiveConf.NumReduceTasks,
                                       BaseAppName = mapTaskContext.AppName,
                                       BaseIteration = mapTaskContext.Iteration,
                                       OutDir = jobModel.OutDir,
                                       InDir = jobModel.InDir,
                                       BCastDataURI = jobID + "_" + primitiveConf.Iteration,
                                       IsPrimitiveBCast = true
                                   };

            jobBoardDataContext.AddObject(jobBoardDataContext.Name, jobIteration);
            jobBoardDataContext.SaveChangesWithRetries();

            var client = new TwisterAzureClient(_storageAccount, jobID, _mrConf.ReduceSchedQueueName);

            //TODO make sure we add reduce tasks only once
            client.AddIterativeReduceTasks(jobModel, primitiveConf.AppName, primitiveConf.Iteration,
                                           jobID + "_" + primitiveConf.Iteration, primitiveConf.NumReduceTasks,
                                           reduceContext, primitiveConf.IsMerge);
        }

        public static JobDataModel AddIterationGeneric(string appName, int newIteration, string bCastURI,
                                                       ReduceContext reduceContext,
                                                       bool isTableBroadcast, int numReduceTasks,
                                                       JobDataContext jobContext,
                                                       JobBoardDataContext jobBoardDataContext, string reduceQueueName,
                                                       CloudStorageAccount storageAccount)
        {
            int retryCount = 0;
            jobFetchRetry:
            IQueryable<JobDataModel> jobQueryable = jobContext.Jobs.Where(job => job.RowKey == reduceContext.JobID);
            JobDataModel jobModel = jobQueryable.FirstOrDefault();
            if (jobModel == null)
            {
                if (retryCount < 50)
                {
                    retryCount++;
                    Thread.Sleep(retryCount*5);
                    goto jobFetchRetry;
                }
                throw new Exception("No table entry for the Job");
            }

            string jobID = reduceContext.JobID;

            if (numReduceTasks < 0)
            {
                numReduceTasks = jobModel.NumReduceTasks;
            }

            var jobIteration = new JobBoardDataModel
                                   {
                                       RowKey = newIteration.ToString(),
                                       PartitionKey = jobID + "_" + appName,
                                       AppName = appName,
                                       JobID = jobID,
                                       MapTaskTable = DataModelUtils.GetMapTableName(jobID),
                                       ProgramParams = jobModel.ProgramParams,
                                       NumReduceTasks = numReduceTasks,
                                       BaseAppName = reduceContext.AppName,
                                       BaseIteration = reduceContext.Iteration,
                                       OutDir = jobModel.OutDir,
                                       InDir = jobModel.InDir,
                                       BCastDataURI = bCastURI,
                                       IsPrimitiveBCast = isTableBroadcast
                                   };

            jobBoardDataContext.AddObject(jobBoardDataContext.Name, jobIteration);
            jobBoardDataContext.SaveChangesWithRetries();

            var client = new TwisterAzureClient(storageAccount, jobID, reduceQueueName);

            client.AddIterativeReduceTasks(jobModel, appName, newIteration, bCastURI, numReduceTasks,
                                           reduceContext.ReduceDataContext, jobModel.PerformMerge);
            return jobModel;
        }
    }
}