﻿////////////////////////////////////////////////////////////////
///
/// File: Models.cs
/// Author: Cristian Dittamo
/// Last update: 25 March 2010
/// Description: This file contains the definition of the 
///              main classes needed for mapping different model of parallel 
///              computation over VM one.
/// To do: 
/// ------------------------------------------------------------
/// Copyright (c) 2009-2010 Cristian Dittamo (dittamo@di.unipi.it)
/// 
/// The use and distribution terms for this software are 
/// contained in the file named license.txt, which can be found 
/// in the root of this distribution.
/// By using this software in any fashion, you are agreeing to 
/// be bound by the terms of this license.
///
/// You must not remove this notice, or any other, from this
/// software.
/// ------------------------------------------------------------
////////////////////////////////////////////////////////////////

using System;
using System.Collections.Generic;
using System.IO;
using System.Reflection;
using System.Runtime.InteropServices;
using CompileLib.InteropServices;
using CompileLib.InteropServices.NVIDIA;
using CompileLib.Translation.NVIDIA;
using StreamDefs;


namespace ModelsMapping
{
    public enum DPOper
    {
        Add = 1,
        Prod = 2
    }

    public interface Model<T>
    {
        //void Run();
    }

    public class Data<T>
    {
        private int _x;
        private int _y;
        private T[,] _matrix;

        public Data(int x, int y, T[,] array)
        {
            _x = x;
            _y = y;
            _matrix = array;
        }

        public int X { get { return _x; } }
        public int Y { get { return _y; } }
        public T[,] Matrix { get { return _matrix; } }
    }

    public class Tuple<T>
    {
        private T _out;
        private T _inp1;
        private T _inp2;

        public Tuple(T inp1, T inp2, T outp)
        {
            _out = outp;
            _inp1 = inp1;
            _inp2 = inp2;
        }

        public T Input1 { get { return _inp1; } }
        public T Input2 { get { return _inp2; } }
        public T Output { get { return _out; } }
    }

    public class DataParallelIterator
    {
        public void GetField()
        {
            Type t = this.GetType();
            FieldInfo[] fis = t.GetFields(BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.Public);
            foreach (FieldInfo fi in fis)
            {
                if (fi.FieldType.IsArray)
                {
                    int rank = fi.FieldType.GetArrayRank();
                    Console.Write("Array - name:{0} rank:{1} type:{2}", fi.Name, rank, fi.FieldType.GetElementType());
                    Object Temp = (Object)fi.GetValue(this);
                    int len = ((Array)Temp).GetLength(0);
                    Console.Write(" length:");
                    if (rank == 1)
                        Console.WriteLine("{0}", len);
                    else
                    {
                        for (int i = 0; i < rank; i++)
                            Console.Write("{0} ", ((Array)Temp).GetLength(i));
                        Console.WriteLine();
                    }
                }
            }
        }

        public static IEnumerable<int> ParallelIterator(int init, int end, int increment)
        {
            for (int i = init; i < end; i += increment)
                yield return i;
        }
    }

    public class DataParallel : Model<int>
    {   
        protected int _cores;
        private object[][] _parameters;

        protected OutputStream<int> _output;
        protected Object _currentInstance;
        protected MethodInfo _compute;

        public DataParallel(Object ob)
        {
            _currentInstance = ob;
            _cores = System.Environment.ProcessorCount;
            
            _parameters = new object[_cores][];

        }

        //public virtual void Scatter(T[] data)
        //{
        //    int index = 0;
        //    data4core = data.Length / _cores;

        //    object[][] newparam = new object[_cores][];
            
        //    //Scatter
        //    for (int c = 0; c < _cores; c++)
        //    {
        //        newparam[c] = new object[data4core];
        //        for (int i = 0; i < data4core; i++)
        //            newparam[c][i] = data[index++];
        //    }

        //    _listParams.Add(newparam);
        //}

        public virtual void Map(string method_name, StreamDefs.OutputStream<int> outArg, params StreamDefs.InputStream<int>[] inputArgList)
        {
            // array of array: all rows are equals in length

            _output = outArg;

            if (inputArgList.Length > 0)
            {
                // Input parameters
                for (int j = 0; j < inputArgList.Length; j++)
                {
                    List<InputStream<int>> inputs = inputArgList[j].Split(_cores);

                    for (int c = 0; c < _cores; c++)
                    {
                        if(j == 0)
                            _parameters[c] = new object[inputArgList.Length + 1]; // + 1 for the output parameter

                        _parameters[c][j] = inputs[c];
                    }
                }
                // Output parameter
            }
            if (_output != null)
            {
                List<OutputStream<int>> outs = _output.Split(_cores);

                for (int c = 0; c < _cores; c++)
                    _parameters[c][inputArgList.Length] = outs[c];
            }
            else
                for (int c = 0; c < _cores; c++)
                    _parameters[c] = null;
            
            _compute = _currentInstance.GetType().GetMethod(method_name);

            // TODO: make it multi-threaded with a barrier at the end
            for (int i = 0; i < _cores; i++)
                _compute.Invoke(_currentInstance, _parameters[i]);

            for (int c = 0; c < _cores; c++)
            {
                while (((OutputStream<int>)_parameters[c][inputArgList.Length]).MoveNext())
                {
                    if(_output.MoveNext())
                        _output.Current = ((OutputStream<int>)_parameters[c][inputArgList.Length]).Current;
                }
            }
        }

        //public virtual Object Reduce(string method_name, DPOper operation, Object[] input, int output_len)
        //{
        //    double[] output = Map(method_name, input, output_len);
        //    double result = null;
        //    switch (operation)
        //    {
        //        case DPOper.Add:
        //              for (int i = 0; i < output_len; i++)
        //                  result += output[i];
        //              break;
        //        case DPOper.Prod:
        //              for (int i = 0; i < output_len; i++)
        //                  result *= output[i];
        //              break;
        //    }
        //}

        //public virtual void Run()
        //{
        //    //object[] param = new object[3];
        //    //param[0] = new InputStream<int>(3);
        //    //param[1] = new InputStream<int>(3);
        //    //param[2] = new OutputStream<int>(3);
        //    for (int i = 0; i < _cores; i++)
        //        //_compute.Invoke(_currentInstance, param);
        //        _compute.Invoke(_currentInstance, _parameters[i]);
        //}

        //public virtual T[] Gather()
        //{

        //int index = 0;
        //for (int c = 0; c < _cores; c++)
        //{
        //    // iter over last elements that contain output values
        //    for (int i = data4core; i < _parameters[c].Length; i++)
        //        _output[index++] = (T)_parameters[c][i];
        //}

        //return _output;
        //}
    }

    public class GPUDataParallel : DataParallel
    {
        #region Fields
        MetaCUDACompute mcomp;
        MetaCUDAKernel kers;
        GPUDevice device;
        IntPtr job1;
        GPUFunction fun1;
        GPUContext ctx1;
        GPUModule mod1;
        
        int[] ptrInput;
        GCHandle[] gcHdInput;

        int devices;
        #endregion

        public GPUDataParallel(Object ob)
            : base(ob)
        {
            // Get reflected info from ob's type
            mcomp = (MetaCUDACompute)MetaCUDACompiler.FindKernels(_currentInstance.GetType());
            devices = 0;
            CUDA.GetDevices(ref devices);
            if (devices == 0)
                throw new ArgumentOutOfRangeException("ERROR: no devices available");
            
            //FIX-ME: should consider other devices (if available)
            device = new GPUDevice(0);
        }

        public override void Map(string method_name, StreamDefs.OutputStream<int> outArg, params StreamDefs.InputStream<int>[] inputArgList)
        {
            kers = mcomp.GetKernel(method_name);

            if (!File.Exists(mcomp.ptxFile.FileName))
            {
                mcomp.ptxFile.Body = kers.EvalBody(outArg, inputArgList);
                mcomp.ptxFile.Write();
            }

            //###########################  Initialization function calls  ##############################################

            job1 = CUDA.InitEnv(new GPUDevice(0), kers.NumOfArguments(), 1);
            ctx1 = CUDA.createContext(job1);
            mod1 = CUDA.LoadBin(job1, mcomp.ptxFile.FileName);
            fun1 = CUDA.LoadFunc(job1, kers.Name);

            //##########################################################################################################

            if (inputArgList.Length > 0)
            {
                ptrInput = new int[inputArgList.Length];
                gcHdInput = new GCHandle[inputArgList.Length];

                for (int j = 0; j < inputArgList.Length; j++)
                {
                    //List<InputStream<int>> inputs = listInputs[j].Split(_cores);

                    //for (int c = 0; c < _cores; c++)
                    //{
                    //    if (j == 0)
                    //        _parameters[c] = new object[listInputs.Length + 1]; // + 1 for the output parameter

                    //    _parameters[c][j] = inputs[c];
                    //}

                    int mem_size_input = InputStream<int>.GetSize() * inputArgList[j].Count();

                    gcHdInput[j] = GCHandle.Alloc(inputArgList[j].ToArray(), GCHandleType.Pinned);
                    ptrInput[j] = CUDA.MAllocOnGPU(job1, mem_size_input);

                    CUDA.MCpyCPU2GPU(job1, ptrInput[j], gcHdInput[j].AddrOfPinnedObject(), mem_size_input);
                }
            }

            // Output argument is always the last one
            if (outArg == null)
                throw new ArgumentNullException();

            int[] hOutput = outArg.ToArray();
            int mem_size_output = OutputStream<int>.GetSize() * outArg.Count();

            int dOutput = CUDA.MAllocOnGPU(job1, mem_size_output);
            GCHandle hOutputHandle = GCHandle.Alloc(hOutput, GCHandleType.Pinned);
            IntPtr outPtr = hOutputHandle.AddrOfPinnedObject();

            // setup execution parameters
            int offset = 0;
            offset = CUDA.SetParamV(job1, fun1, offset, dOutput);

            for (int j = 0; j < inputArgList.Length; j++)
                offset = CUDA.SetParamV(job1, fun1, offset, ptrInput[j]);
            
            CUDA.SetParamSize(fun1, offset);

            CUDA.SetBlockShape(fun1, 1, 1, 1);

            CUDA.LaunchGrid(fun1, 1, 1);

            CUDA.MCpyGPU2CPU(job1, outPtr, dOutput, mem_size_output);
        }

        ~GPUDataParallel()
        {
            CUDA.Exit(job1);
        }
    }

    //public class BSP
    //{
        
    //}


}
