﻿#region LicenseHeader

// Copyright 2012 The Trustees of Indiana University.  All rights reserved.
// 
// The Indiana University licenses this file to you under the Apache License, 
// Version 2.0 (the "License"); you may not use this file except in compliance 
// with the License.  You may obtain a copy of the License at 
// 
// http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software 
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
// See the License for the specific language governing permissions and 
// limitations under the License.
// 
// @author Thilina Gunarathne (tgunarat@indiana.edu)

#endregion

using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using AzureMRCore.DataCommunication;
using AzureMRCore.DataModel;
using AzureMRCore.DataTypes;
using AzureMRCore.Drivers;
using AzureMRCore.InputFormat;
using AzureMRCore.IterativeDataCache;
using AzureMRCore.OutputFormat;
using AzureMRCore.Primitives;
using Microsoft.WindowsAzure;
using Microsoft.WindowsAzure.ServiceRuntime;
using Microsoft.WindowsAzure.StorageClient;

namespace AzureMRCore.MapRed
{
    //TODO optimize single reduce case (eg: use the output blob of the reducer as the broadcast)
    /// <summary>
    ///   Perform the merge step of the iterative MapReduce computation. In the merge(), users can make the decision to finish the computation or to continue with a new iteration. Merge will receive all the outputs of the reduce tasks.
    /// </summary>
    /// <typeparam name="TInkey"> </typeparam>
    /// <typeparam name="TInvalue"> </typeparam>
    /// <typeparam name="TOutKey"> </typeparam>
    /// <typeparam name="TOutValue"> </typeparam>
    /// <typeparam name="TBCastInKey"> </typeparam>
    /// <typeparam name="TBCastInValue"> </typeparam>
    public class Merger<TInkey, TInvalue, TOutKey, TOutValue, TBCastInKey, TBCastInValue> : IMerger
        where TInkey : Key, new()
        where TOutKey : Key, new()
        where TBCastInKey : Key, new()
        where TInvalue : IValue, new()
        where TOutValue : IValue, new()
        where TBCastInValue : IValue, new()
    {
        private CloudBlobClient _cloudBlobClient;
        private CloudTableClient _cloudTableClient;
        private DataCache _dataCache;
        private int _dataFetchSleepTime;
        private bool _isMergeOut;
        private JobBoardDataContext _jobBoardDataContext;
        private MRConf _mrConf;
        private MRDriver _mrDriver;
        private CloudStorageAccount _storageAccount;
        private string _tableBaseAddress;
        private Guid _workerId;

        #region IMerger Members

        public virtual Boolean Init(CloudStorageAccount storageAccount, MRConf mrConf, MRDriver mrDriver,
                                    DataCache dataCache)
        {
            _mrConf = mrConf;
            _mrDriver = mrDriver;
            _dataCache = dataCache;
            _dataFetchSleepTime = mrConf.RedQPollingInterval;

            _storageAccount = storageAccount;
            _tableBaseAddress = _storageAccount.TableEndpoint.ToString();
            _cloudBlobClient = _storageAccount.CreateCloudBlobClient();
            _cloudTableClient = _storageAccount.CreateCloudTableClient();

            _jobBoardDataContext = new JobBoardDataContext(_tableBaseAddress, _storageAccount.Credentials,
                                                           DataModelUtils.GetJobBoardTableName());
            _cloudTableClient.CreateTableIfNotExist(_jobBoardDataContext.Name);

            _workerId = Guid.NewGuid();
            return true;
        }

        public void RunMerge(ReduceContext reduceContext, object reduceCollectorObject)
        {
            //Trace.WriteLine("Start Merge. Reduce ID:" + reduceContext.ReduceID + " i:" + reduceContext.Iteration);
            int iteration = Int32.Parse(reduceContext.Iteration);
            List<KeyValuePair<TBCastInKey, TBCastInValue>> bCastData = null;

            //TODO : merge with Mapper.getBCastData and support table broadcasting
            ReduceDataModel reduceTask = reduceContext.ReduceTask;
            if (reduceTask.BroadcastData != null)
            {
                if (_mrConf.BCastReceiver.ContainsBCastData(reduceTask.BroadcastData))
                {
                    var bCastInputFormat =
                        (IInputFormat<TBCastInKey, TBCastInValue>)
                        _mrConf.BCastReceiver.GetBCastData(reduceTask.BroadcastData);
                    bCastData = bCastInputFormat.GetAllPairs();
                    reduceContext.IsTableBCast = true;
                }
                else
                {
                    // TODO seperate config for BCastdata input format
                    IInputFormat<TBCastInKey, TBCastInValue> bCastInputFormat =
                        _dataCache.GetData(reduceTask.BroadcastData,
                                           _mrDriver.GetBCastInputFormat<TBCastInKey, TBCastInValue>(),
                                           reduceContext.JobID, reduceContext.Iteration, reduceTask.AppName,
                                           _workerId.ToString(), null, true);
                    bCastData = bCastInputFormat.GetAllPairs();
                }
            }
//            Trace.WriteLine("Merge Bcast fetch Done" + _workerId);
            ConcurrentDictionary<TInkey, List<TInvalue>> decodedData = FetchDecodeReduceOutputs(reduceContext,
                                                                                                reduceCollectorObject);

            //invoke merge task when everything is done
            var collector = new ReduceOutputCollector<TOutKey, TOutValue>();

            int status = 0;
            bool addIteration;
            status = Merge(decodedData, reduceContext.ProgramParams, bCastData, collector, reduceContext,
                           out addIteration);

//            Trace.WriteLine("Done Merge. addIteration:" + addIteration + " Reduce ID:" + reduceContext.ReduceID + " i:" +
//                            reduceContext.Iteration);

            if (addIteration)
            {
                AddIteration(reduceContext.AppName, iteration + 1, collector, reduceContext);
//                Trace.WriteLine("Added Iteration: Reduce ID:" + reduceContext.ReduceID + " i:" + reduceContext.Iteration);
            }

            if (!_isMergeOut)
            {
                SaveMergeOutput(reduceContext, collector);
            }

            TryRemoveCurrentIteration(reduceContext);
            reduceContext.UpdateReduceTaskStatus(MapDataModel.FINISHED);
        }

        #endregion

        #region Add/Remove Iterations

        protected void AddIteration(string appName, int newIteration,
                                    IOutputCollector<TOutKey, TOutValue> collector,
                                    ReduceContext reduceContext, int numReduceTasks = -1)
        {
            //string mergeOut = SaveMergeOutput(reduceContext, (ReduceOutputCollector<TOutKey, TOutValue>) collector);

            string mergeOut = TCPBcastMerge(reduceContext, (ReduceOutputCollector<TOutKey, TOutValue>) collector,
                                            newIteration);
            //TableBroadcast is false as we are uploading data here
            AddIteration(appName, newIteration, mergeOut, reduceContext, true, numReduceTasks);
        }

        protected void AddIteration(string appName, int newIteration, string bCastDataID, ReduceContext reduceContext,
                                    bool isTableBroadcast,
                                    int numReduceTasks = -1)
        {
            var jobContext = new JobDataContext(_tableBaseAddress, _storageAccount.Credentials,
                                                DataModelUtils.GetJobTableName());
            JobDataModel jobModel = DataCommunicator.AddIterationGeneric(appName, newIteration, bCastDataID,
                                                                         reduceContext,
                                                                         isTableBroadcast, numReduceTasks, jobContext,
                                                                         _jobBoardDataContext,
                                                                         _mrConf.ReduceSchedQueueName,
                                                                         _storageAccount);

            jobModel.CurrentIteration = newIteration;
            jobContext.UpdateObject(jobModel);
            jobContext.SaveChanges();
        }

        protected string TCPBcastMerge(ReduceContext reduceTaskContext,
                                       ReduceOutputCollector<TOutKey, TOutValue> collector, int iteration)
        {
            string inputFormatType = _mrDriver.GetBCastInputFormat<TBCastInKey, TBCastInValue>
                ().GetType().AssemblyQualifiedName;

            IOutputFormat<TOutKey, TOutValue> outFormat =
                _mrDriver.GetMergeOutputFormat<TOutKey, TOutValue>();
            byte[] data = outFormat.ToBytes(collector.GetResults());

            var item = new TreeBCastItem
                           {
                               JobID = reduceTaskContext.JobID,
                               Data = data,
                               InputFormatType = inputFormatType,
                               Iteration = iteration,
                               TransferType = Types.SimpleBCast
                           };

            int nodeCount = RoleEnvironment.CurrentRoleInstance.Role.Instances.Count();
            var task =
                new Task(
                    () =>
                    DataTransferClient.TreeBroadcast(item, 0, (nodeCount - 1), _mrConf.Factory, _mrConf.ParallelBCastMax));
            task.Start();
            BCastServiceReceiver bCastServiceReceiver = BCastServiceReceiver.GetInstance();
            bCastServiceReceiver.AddSimpleBCastData(item);
            return reduceTaskContext.JobID + "_" + iteration;
        }

        protected string SaveMergeOutput(ReduceContext reduceTaskContext,
                                         ReduceOutputCollector<TOutKey, TOutValue> collector)
        {
            _isMergeOut = true;
            CloudBlobContainer container =
                _cloudBlobClient.GetContainerReference(reduceTaskContext.OutDir);
            container.CreateIfNotExist();

            IOutputFormat<TOutKey, TOutValue> outFormat =
                _mrDriver.GetMergeOutputFormat<TOutKey, TOutValue>();

            Dictionary<string, TOutKey> outRefs = outFormat.UploadValues(collector.GetResults(), "MergedOutput",
                                                                         reduceTaskContext.Iteration,
                                                                         container);
            return outRefs.Keys.FirstOrDefault();
        }

        private void TryRemoveCurrentIteration(ReduceContext reduceContext)
        {
            try
            {
                var finishedIteration = new JobBoardDataModel
                                            {
                                                RowKey = reduceContext.Iteration,
                                                PartitionKey =
                                                    reduceContext.JobID + "_" + reduceContext.AppName
                                            };

                _jobBoardDataContext.AttachTo(_jobBoardDataContext.Name, finishedIteration, "*");
                _jobBoardDataContext.DeleteObject(finishedIteration);
                _jobBoardDataContext.SaveChanges();
//                Trace.WriteLine("Removed JobBoard Entry Reduce ID:" + reduceContext.ReduceID + " i:" +
//                                reduceContext.Iteration);
            }
            catch (Exception e)
            {
                //No need to crash due to failing to remove the bulleting entry.
//                Trace.WriteLine("Failed to Remove JobBoard Entry Reduce ID:" + reduceContext.ReduceID + " i:" +
//                                reduceContext.Iteration);
            }
        }

        #endregion

        #region fetch/decode reduce outputs

        private ConcurrentDictionary<TInkey, List<TInvalue>> FetchDecodeReduceOutputs(ReduceContext reduceContext,
                                                                                      object reduceCollectorObject)
        {
            LocalResource localResource = RoleEnvironment.GetLocalResource("temp");
            var decodedData = new ConcurrentDictionary<TInkey, List<TInvalue>>(); //(keyComparer);

            //optimizing for single reduce task
            int numReduceTasks = reduceContext.ReduceTask.NumReduceTasks;
            if (numReduceTasks > 1)
            {
                Dictionary<string, string> intermediateData = FetchReduceOutputs(localResource,
                                                                                 reduceContext,
                                                                                 numReduceTasks);
                //var keyComparer = new KeyComparer();
                // Not using a KeyComparer as we Assume Reduce outputs to have unique keys
                IOutputFormat<TInkey, TInvalue> inFormat = _mrDriver.GetReduceOutputFormat<TInkey, TInvalue>();
                //TODO : optimize for the output from the local parent reduce task
                foreach (var dataProduct in intermediateData)
                {
                    inFormat.DecodeData(dataProduct.Key, dataProduct.Value, decodedData);
                }
            }
            //            Trace.WriteLine("Merge Fetch Done" + _workerId);

            ReduceOutputCollector<TInkey, TInvalue> reduceCollector;
            if (reduceCollectorObject != null)
            {
                reduceCollector = (ReduceOutputCollector<TInkey, TInvalue>) reduceCollectorObject;
            }
            else
            {
                throw new Exception("Reduce Collector Object Cannot be Null.");
            }
            // get the output from the local parent reduce task.
            foreach (var kvPair in reduceCollector.GetResults())
            {
                List<TInvalue> values = decodedData.GetOrAdd(kvPair.Key, new List<TInvalue>());
                lock (values)
                {
                    values.Add(kvPair.Value);
                }
            }
            //            Trace.WriteLine("Merge Decode Done" + _workerId);
            return decodedData;
        }

        private Dictionary<string, string> FetchReduceOutputs(LocalResource localResource, ReduceContext reduceContext,
                                                              int numReduceTasks)
        {
            //list of records fetched so far.
            var fetchedRecords = new List<string>();
            // fetched data
            var reduceOutputs = new Dictionary<string, string>();

            while (true)
            {
                int retry = 0;

                //Rather than failing, do some retries here 
                mergeDataFetchLabel:
                try
                {
                    IQueryable<ReduceDataModel> reduceTaskRecords =
                        reduceContext.ReduceDataContext.ReduceTasks;

                    IEnumerable<ReduceDataModel> recordsToFetch = reduceTaskRecords.Where(
                        p => ((p.PartitionKey == reduceContext.Iteration) & (p.TaskStatus == MapDataModel.FINISHED) &
                              p.AppName == reduceContext.AppName));
                    //Criteria to decide that we have fetched all the data

                    foreach (ReduceDataModel record in recordsToFetch)
                    {
                        //Fetch only if it's not fetched yet.
                        if (!(fetchedRecords.Contains(record.RowKey)))
                        {
                            string jobID = record.PartitionKey;
                            string taskID = record.RowKey;
                            string key = record.Key;
                            string blobURI = record.OutputPath;

                            string downloadDir = localResource.RootPath + "\\" + jobID + "mergeinput";
                            string localInputFileName = MapRedUtils.DownloadBLOB(downloadDir, _cloudBlobClient,
                                                                                 jobID, blobURI);

                            reduceOutputs.Add(localInputFileName, key);

                            //update the task table
                            record.FetchedBy = _workerId.ToString();
                            reduceContext.ReduceDataContext.UpdateObject(record);
                            //update the fetched record list
                            fetchedRecords.Add(record.RowKey);
                        }
                    }
                    //save here or save above for each record.. have to test
                    reduceContext.ReduceInputDataContext.SaveChangesWithRetries();

                    if ((numReduceTasks - 1) == fetchedRecords.Count)
                    {
                        break;
                    }
                    else if (numReduceTasks < fetchedRecords.Count)
                    {
                        throw new Exception("Fetched more reduce outputs than reducers!!");
                    }
                }
                catch (Exception e)
                {
                    if (retry < Constants.NUM_RETRIES)
                    {
                        retry++;
                        Thread.Sleep(100);
                        goto mergeDataFetchLabel;
                    }
                    throw new Exception("mergeTaskDataFetchLabel failed. retry " + retry, e);
                }

                Thread.Sleep(_dataFetchSleepTime/10);
            }
            return reduceOutputs;
        }

        #endregion

        /// <summary>
        ///   Perform the merge step of the iterative computation. In here, users can make the decision to finish the computation or to continue with a new iteration. This method will receive all the outputs of the reduce tasks.
        /// </summary>
        /// <param name="values"> All the output Key Values from Reducers. </param>
        /// <param name="programArgs"> </param>
        /// <param name="dynamicData"> Broadcast data for the current iteration </param>
        /// <param name="outputCollector"> Collects the output key-value pairs of the Merge Task </param>
        /// <param name="reduceContext"> </param>
        /// <param name="addIteration"> Output parameter. Setting this to true will add another iteration of this same application with the merge output data as the BroadCast data for the new iteration. </param>
        /// <returns> </returns>
        public virtual int Merge(IDictionary<TInkey, List<TInvalue>> values, string programArgs,
                                 List<KeyValuePair<TBCastInKey, TBCastInValue>> dynamicData,
                                 IOutputCollector<TOutKey, TOutValue> outputCollector, ReduceContext reduceContext,
                                 out bool addIteration)
        {
            addIteration = false;
            return 0;
        }
    }
}