﻿#region LicenseHeader

// Copyright 2012 The Trustees of Indiana University.  All rights reserved.
// 
// The Indiana University licenses this file to you under the Apache License, 
// Version 2.0 (the "License"); you may not use this file except in compliance 
// with the License.  You may obtain a copy of the License at 
// 
// http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software 
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
// See the License for the specific language governing permissions and 
// limitations under the License.
// 
// @author Thilina Gunarathne (tgunarat@indiana.edu)

#endregion

using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Threading;
using AzureMRCore.DataCommunication;
using AzureMRCore.DataModel;
using AzureMRCore.DataTypes;
using AzureMRCore.Drivers;
using AzureMRCore.IterativeDataCache;
using AzureMRCore.OutputFormat;
using Microsoft.WindowsAzure;
using Microsoft.WindowsAzure.ServiceRuntime;
using Microsoft.WindowsAzure.StorageClient;

namespace AzureMRCore.MapRed
{
    public class Reducer<INKEY, INVALUE, OUTKEY, OUTVALUE> : IReducer
        where INKEY : Key, new()
        where OUTKEY : Key, new()
        where INVALUE : IValue, new()
        where OUTVALUE : IValue, new()
    {
        private CloudBlobClient _cloudBlobClient;
        private DataCache _dataCache;
        private int _dataFetchSleepTime;
        // TODO:  private bool exponentialBackOff;
        private MRConf _mrConf;
        private MRDriver _mrDriver;
        private int _reduceQPollingSleepTime;
        private CloudStorageAccount _storageAccount;
        private Guid _workerId;

        #region IReducer Members

        public bool Init(CloudStorageAccount storageAccount, MRConf mrConf, MRDriver mrDriver, DataCache dataCache)
        {
            _mrConf = mrConf;
            _mrDriver = mrDriver;
            _dataCache = dataCache;
            _reduceQPollingSleepTime = mrConf.RedQPollingInterval;
            _dataFetchSleepTime = mrConf.RedQPollingInterval;
            //TODO exponentialBackOff = mrConf.exponentialBackoff;

            _storageAccount = storageAccount;
            _cloudBlobClient = _storageAccount.CreateCloudBlobClient();

            _workerId = Guid.NewGuid();
            return true;
        }

        public void ExecuteReduce(ReduceContext reduceTaskContext)
        {
            IDictionary<INKEY, List<INVALUE>> decodedData = GetIntermediateData(reduceTaskContext);
            Stopwatch sw = Stopwatch.StartNew();

            //invoke reduce task when everything is done
            var collector = new ReduceOutputCollector<OUTKEY, OUTVALUE>();
            IOrderedEnumerable<KeyValuePair<INKEY, List<INVALUE>>> sortedResults =
                decodedData.OrderBy(kv => kv.Key.GetTextValue());

            reduceTaskContext.ReduceTask.SortTime = sw.ElapsedMilliseconds;
            sw.Restart();
            foreach (var recordSet in decodedData)
            {
                int status = 0;
                status = Reduce(recordSet.Key, recordSet.Value, collector, reduceTaskContext.ProgramParams);
                //TODO act on status
            }

            reduceTaskContext.ReduceTask.ReduceExecutionTime = sw.ElapsedMilliseconds;
            sw.Restart();

            IMerger merger = _mrDriver.GetMerger();
            // do not spend time on saving output, if it can be directly passed to Merge step
            if (reduceTaskContext.ReduceTask.RunMerge & merger != null)
            {
                merger.Init(_storageAccount, _mrConf, _mrDriver, _dataCache);
                merger.RunMerge(reduceTaskContext, collector);
            }
            else
            {
                SaveReduceOutput(reduceTaskContext, collector);
            }
            //            Trace.WriteLine("Merge Done" + _workerId);
            reduceTaskContext.ReduceTask.MergeTime = sw.ElapsedMilliseconds;

            reduceTaskContext.UpdateReduceTaskStatus(MapDataModel.FINISHED);
        }

        #endregion

        protected virtual Boolean Config(CloudStorageAccount storageAccount, object configOut)
        {
            return true;
        }

        public virtual int Reduce(INKEY key, List<INVALUE> values, IOutputCollector<OUTKEY, OUTVALUE> outputCollector, string programArgs)
        {
            return 0;
        }


        private IDictionary<INKEY, List<INVALUE>> GetIntermediateData(ReduceContext reduceTaskContext)
        {
            Stopwatch sw = Stopwatch.StartNew();
            ConcurrentDictionary<INKEY, List<INVALUE>> decodedData;
            if (_mrDriver.DirectTCPTranfer > 0)
            {
                string id = reduceTaskContext.JobID + "_" + reduceTaskContext.Iteration + "_" +
                            reduceTaskContext.AppName;
                ReduceDataReceiver reduceDataReceiver = ReduceDataReceiver.GetInstance();
                IOutputFormat<INKEY, INVALUE> inFormat = _mrDriver.GetMapOutputFormat<INKEY, INVALUE>();
                var keyComparer = new KeyComparer();
                decodedData = new ConcurrentDictionary<INKEY, List<INVALUE>>(keyComparer);

                //Wait till maps are done.. TODO: do decoding in here
                while (
                    !reduceTaskContext.IsAllMapDone(reduceTaskContext.NumMaps, reduceTaskContext.Iteration,
                                                    reduceTaskContext.AppName, _reduceQPollingSleepTime))
                {
                }
                while (!reduceDataReceiver.ContainsReduceData(id)) ;
                ConcurrentDictionary<int, DataItem> reduceData = reduceDataReceiver.GetReduceData(id);
                foreach (DataItem dataItem in reduceData.Values)
                {
                    inFormat.FromBytes(dataItem.Data, decodedData);
                }
            }
            else
            {
                LocalResource localResource = RoleEnvironment.GetLocalResource("temp");
                IOutputFormat<INKEY, INVALUE> inFormat = _mrDriver.GetMapOutputFormat<INKEY, INVALUE>();
                // Fetch & Decode data
                decodedData = FetchIntermediateData(localResource, inFormat, reduceTaskContext);
            }
            reduceTaskContext.ReduceTask.FetchTime = sw.ElapsedMilliseconds;
            return decodedData;
        }

        private void SaveReduceOutput(ReduceContext reduceTaskContext, ReduceOutputCollector<OUTKEY, OUTVALUE> collector)
        {
            CloudBlobContainer container =
                _cloudBlobClient.GetContainerReference(reduceTaskContext.OutDir);
            container.CreateIfNotExist();

            IOutputFormat<OUTKEY, OUTVALUE> outFormat = _mrDriver.GetReduceOutputFormat<OUTKEY, OUTVALUE>();

            Dictionary<string, OUTKEY> outFiles = outFormat.UploadValues(collector.GetResults(),
                                                                         reduceTaskContext.ReduceID,
                                                                         reduceTaskContext.Iteration, container);
            reduceTaskContext.ReduceTask.OutputPath = outFiles.Keys.FirstOrDefault();

            OUTKEY key = outFiles.Values.FirstOrDefault();
            if (key != null)
            {
                reduceTaskContext.ReduceTask.Key = key.GetTextValue();
            }

            //TODO remove this when the reduce-parent-merge data integration is done
            reduceTaskContext.ReduceDataContext.UpdateObject(reduceTaskContext.ReduceTask);
            reduceTaskContext.ReduceDataContext.SaveChangesWithRetries();
        }

        private ConcurrentDictionary<INKEY, List<INVALUE>> FetchIntermediateData(LocalResource localResource,
                                                                                 IOutputFormat<INKEY, INVALUE> inFormat,
                                                                                 ReduceContext reduceContext)
        {
            int retry = 0;
            var keyComparer = new KeyComparer();
            var decodedData = new ConcurrentDictionary<INKEY, List<INVALUE>>(keyComparer);

            var fetchedRecords = new ConcurrentDictionary<string, string>();
            var fetchingRecords = new List<string>();

            reduceTaskDataFetchLabel:
            //Rather than failing, do some retries here 
            try
            {
                while (true)
                {
                    bool fetchedNow = false;

                    ReduceInputDataContext reduceInputDataContext = reduceContext.ReduceInputDataContext;
                    //right aligns the iteration number so that we can query the table based on partitions
                    string iteration = reduceContext.Iteration.PadLeft(5, '0');
                    string nextIter = (Int32.Parse(reduceContext.Iteration) + 1).ToString().PadLeft(5, '0');
                    //Querying the table based on partitions
                    IQueryable<ReduceInputDataModel> reduceTaskRecords =
                        reduceInputDataContext.ReduceTaskRecords.Where(
                            p =>
                            (((p.PartitionKey.CompareTo(iteration)) >= 0) && ((p.PartitionKey.CompareTo(nextIter)) < 0)) &
                            (p.FetchedBy != _workerId.ToString())).AsTableServiceQuery();

                    foreach (ReduceInputDataModel record in reduceTaskRecords)
                    {
                        if (record.Value != null)
                        {
                            fetchedNow = fetchedNow |
                                         FetchTableData(reduceInputDataContext, record, decodedData, inFormat,
                                                        fetchedRecords);
                        }
                        else
                        {
                            //Fetch only if it's not fetched yet. A duplicate reducer can fetch after this reducer, 
                            // requiring us to check this in addition to fetchedby
                            if (!(fetchedRecords.ContainsKey(record.RowKey)))
                            {
                                fetchedNow = fetchedNow |
                                             FetchBlobData(reduceInputDataContext, record, decodedData, fetchedRecords,
                                                           inFormat, fetchingRecords, localResource);
                            }
                        }
                    }
                    //save here or save above for each record.. have to test
                    lock (reduceInputDataContext)
                    {
                        reduceInputDataContext.SaveChangesWithRetries();
                    }

                    if (fetchingRecords.Count == 0)
                    {
                        // use short citcuit & to avoid going in to IsDoneFetching
                        //IsDoneFetching performs sleeping too
                        if ((!fetchedNow) && (reduceContext.IsDoneFetching(reduceContext.NumMaps,
                                                                           reduceContext.Iteration,
                                                                           reduceContext.AppName,
                                                                           _reduceQPollingSleepTime,
                                                                           fetchedRecords.Count)))
                        {
                            lock (reduceInputDataContext)
                            {
                                reduceInputDataContext.SaveChangesWithRetries();
                            }
//                            Trace.WriteLine("Leaving fetch..." + RoleEnvironment.CurrentRoleInstance.Id);
                            break;
                        }
                    }
                    else if (!fetchedNow)
                    {
                        Thread.Sleep(_reduceQPollingSleepTime/10);
                    }
                }
            }
            catch (Exception e)
            {
                Trace.WriteLine(e.Message + "    " + e.StackTrace);
                if (retry < Constants.NUM_RETRIES)
                {
                    retry++;
                    Thread.Sleep(100);
                    goto reduceTaskDataFetchLabel;
                }
                throw new Exception("reduceTaskDataFetchLabel failed. retry " + retry, e);
            }
            return decodedData;
        }

        private bool FetchTableData(ReduceInputDataContext reduceInputDataContext, ReduceInputDataModel record,
                                    ConcurrentDictionary<INKEY, List<INVALUE>> decodedData,
                                    IOutputFormat<INKEY, INVALUE> inFormat,
                                    ConcurrentDictionary<string, string> fetchedRecords)
        {
            record.FetchStart = string.Format("{0:G}", DateTime.Now);

            if (record.ValueCount == 1)
            {
                var rowKey = new INKEY();
                rowKey.Parse(record.Key);

                var rowValue = new INVALUE();
                rowValue.FromBytes(record.Value);

                List<INVALUE> values = decodedData.GetOrAdd(rowKey, new List<INVALUE>());
                values.Add(rowValue);
            }
            else
            {
                inFormat.FromBytes(record.Value, decodedData);
            }
            //Trace.WriteLine("fetched " + record.PartitionKey + "  " + record.MapID);
            //record.FetchTime = sw.ElapsedMilliseconds;
            record.FetchedBy = _workerId.ToString();

            if (!fetchedRecords.TryAdd(record.RowKey, ""))
            {
                Trace.TraceError("Adding duplicate key");
                var errorContext = new ErrorDataContext(_storageAccount.TableEndpoint.ToString(),
                                                        _storageAccount.Credentials, "AzureMRErrors");
                _storageAccount.CreateCloudTableClient().CreateTableIfNotExist(errorContext.Name);
                var error =
                    new ErrorDataModel("Adding Duplicate Key. Reduce Fetch. Reducer ID:" +
                                       record.ReduceID);
                errorContext.AddObject(errorContext.Name, error);
                errorContext.SaveChangesWithRetries();
            }
            reduceInputDataContext.UpdateObject(record);
            return true;
        }

        private bool FetchBlobData(ReduceInputDataContext reduceInputDataContext, ReduceInputDataModel record,
                                   ConcurrentDictionary<INKEY, List<INVALUE>> decodedData,
                                   ConcurrentDictionary<string, string> fetchedRecords,
                                   IOutputFormat<INKEY, INVALUE> inFormat, List<string> fetchingRecords,
                                   LocalResource localResource)
        {
            Stopwatch sw = Stopwatch.StartNew();
            string jobID = record.JobID;
            string key = record.Key;
            string blobURI = record.URI;

            //TODO is it fair to assume old file is same (correct) as the new one
            //if (!fetchedRecords.ContainsValue(blobURI))
            //{
            lock (fetchingRecords)
            {
                if (!(fetchingRecords.Contains(record.RowKey)))
                {
                    fetchingRecords.Add(record.RowKey);
                }
                else
                {
                    return false;
                }
            }
            record.FetchStart = string.Format("{0:G}", DateTime.Now);
            reduceInputDataContext.UpdateObject(record);

            string downloadDir = localResource.RootPath + "\\" + jobID + "reduceinput";

            // move data to here
            CloudBlob fileBlob = _cloudBlobClient.GetBlobReference(blobURI);
            DirectoryInfo dir = Directory.CreateDirectory(downloadDir);
            string[] parts = blobURI.Split('/');
            string fileName = parts[parts.Length - 1];
            string localInputFileName = dir.FullName + "\\" + fileName;

            var options = new BlobRequestOptions
                              {
                                  Timeout = new TimeSpan(0, 0, 0, 20),
                                  RetryPolicy = RetryPolicies.Retry(4, new TimeSpan(0, 0, 0, 10))
                              };
            var fs = new FileStream(localInputFileName, FileMode.Create);
            // I’m using an four-hour timeout
            fileBlob.BeginDownloadToStream(fs, options, DownloadBlobToStreamCallback,
                                           new Object[]
                                               {
                                                   fileBlob, fs, inFormat, decodedData,
                                                   localInputFileName, key, record,
                                                   reduceInputDataContext, blobURI
                                                   , sw, fetchedRecords, fetchingRecords
                                               });
            return true;
        }

        public void DownloadBlobToStreamCallback(IAsyncResult result)
        {
            //Get array pszsassed to callback.
            var state = (Object[]) result.AsyncState;
            var stream = (FileStream) state[1];
            var blobURI = (string) state[8];
            var record = (ReduceInputDataModel) state[6];
            var fetchedRecords = (ConcurrentDictionary<string, string>) state[10];
            var fetchingRecords = (List<string>) state[11];
            try
            {
                result.AsyncWaitHandle.WaitOne();
                var blob = (CloudBlob) state[0];
                var inFormat = (IOutputFormat<INKEY, INVALUE>) state[2];
                var decodedData = (ConcurrentDictionary<INKEY, List<INVALUE>>) state[3];
                var localInputFileName = (string) state[4];
                var key = (string) state[5];
                var reduceInputContext = (ReduceInputDataContext) state[7];
                var sw = (Stopwatch) state[9];

                //End the operation.
                blob.EndDownloadToStream(result);
                //Close the stream.
                stream.Close();

                inFormat.DecodeData(localInputFileName, key, decodedData);
                record.FetchedBy = _workerId.ToString();
                record.FetchTime = sw.ElapsedMilliseconds;
                lock (reduceInputContext)
                {
                    reduceInputContext.UpdateObject(record);
                }

                //update the fetched record list
                if (!fetchedRecords.TryAdd(record.RowKey, blobURI))
                {
                    Trace.TraceError("Adding duplicate key");
                    var errorContext = new ErrorDataContext(_storageAccount.TableEndpoint.ToString(),
                                                            _storageAccount.Credentials, "AzureMRErrors");
                    _storageAccount.CreateCloudTableClient().CreateTableIfNotExist(errorContext.Name);
                    var error =
                        new ErrorDataModel("Adding Duplicate Key. Reduce Fetch. Reducer ID:" +
                                           record.ReduceID);
                    errorContext.AddObject(errorContext.Name, error);
                    errorContext.SaveChangesWithRetries();
                }
                //Trace.WriteLine("add the fetched records." + blobURI);
            }
            catch (Exception e)
            {
                Trace.TraceError(e.ToString());
                var errorContext = new ErrorDataContext(_storageAccount.TableEndpoint.ToString(),
                                                        _storageAccount.Credentials, "AzureMRErrors");
                _storageAccount.CreateCloudTableClient().CreateTableIfNotExist(errorContext.Name);
                var error = new ErrorDataModel(e, "maptask", e.Message);
                errorContext.AddObject(errorContext.Name, error);
                errorContext.SaveChangesWithRetries();
            }
            finally
            {
                lock (fetchingRecords)
                {
                    fetchingRecords.Remove(record.RowKey);
                }
            }
        }
    }
}