package org.softee.functional;

import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;

import org.softee.functional.exception.ParallelException;
import org.softee.functional.helper.Preconditions;
import org.softee.functional.operation.Transposal;

public abstract class AbstractParallelFunctional<F> extends AbstractFunctional<F> implements ParallelFunctional<F> {
    protected ExecutionContext executionContext;

    public AbstractParallelFunctional(Collection<F> data) {
        this(data, new ExecutionContext());
    }

    public AbstractParallelFunctional(Collection<F> data, ExecutionContext executionContext) {
        super(data);
        this.executionContext = executionContext;
    }

    /**
     * @return a reference to this
     */
    @Override
    public ParallelFunctional<F> parallel() {
        return this;
    }

    @Override
    public Functional<F> sequential() {
        return new Sequential<F>(data);
    }

    @Override
    public ParallelFunctional<F> withThreadFactor(float threadFactor) {
        Preconditions.assertArgument(threadFactor > 0);
        return withExecutionContext(executionContext.withThreadFactor(threadFactor));
    }

    @Override
    public ParallelFunctional<F> withDecompositionFactor(float decompositionFactor) {
        return withExecutionContext(executionContext.withDecompositionFactor(decompositionFactor));
    }

    @Override
    public ParallelFunctional<F> using(Executor executor) {
        return withExecutionContext(executionContext.withExecutor(executor));
    }

    protected void abort(Throwable t, Collection<? extends Future<?>> futures) {
        for (Future<?> future : futures) {
            future.cancel(true);
        }
        throw new ParallelException(t);
    }

    /**
     * TODO Make transposals parallelizable
     */
    @Override
    public Functional<F> transpose(Transposal<F> transposal) {
        return withData(doTranspose(transposal, data));
    }

    protected abstract <T> ParallelFunctional<T> withData(Collection<T> data);

    protected abstract ParallelFunctional<F> withExecutionContext(ExecutionContext executionContext);

    /**
     * Effectively immutable parallel execution context
     */
    public static class ExecutionContext {
        public static final float DEFAULT_THREAD_FACTOR = 1.0f;
        public static final float DEFAULT_DECOMPOSITION_FACTOR = 2.0f;
        private Executor executor;
        private Float threadFactor;
        private Float decompositionFactor;
        private Integer threadCount;

        public ExecutionContext() {
            this(null, null, null);
        }

        private ExecutionContext(Executor executor, Float threadFactor, Float decompositionFactor) {
            this.executor = executor;
            this.threadFactor = threadFactor;
            this.decompositionFactor = decompositionFactor;
        }

        public ExecutionContext withDecompositionFactor(float decompositionFactor) {
            return new ExecutionContext(null, threadFactor, decompositionFactor);
        }

        public ExecutionContext withThreadFactor(float threadFactor) {
            return new ExecutionContext(null, threadFactor, decompositionFactor);
        }

        public ExecutionContext withExecutor(Executor executor) {
            return new ExecutionContext(executor, null, null);
        }

        public float getThreadFactor() {
            if (threadFactor == null) {
                threadFactor = DEFAULT_THREAD_FACTOR;
            }
            return threadFactor;
        }
        /**
         * @return a lazy initialized decomposition factor to use by this functional. The decomposition factor is
         * a measure of how much the overall task collection will be broken up.
         * A decomposition factor of one (1) means that exactly one task will be generated for each execution thread.
         * A decomposition factor of ten (10) means that ten tasks will be generated for each execution thread.
         * If tasks are completely homogeneous (with identical execution time) a decomposition factor of 1-2 should be chosen.
         * For tasks that are not homogeneous, a higher value should be chosen.
         */
        public float getDecompositionFactor() {
            if (decompositionFactor == null) {
                decompositionFactor = DEFAULT_DECOMPOSITION_FACTOR;
            }
            return decompositionFactor;
        }

       /**
        * @return a lazy initialized thread count to use by this Functional.
        * The thread count will default to (availableProcessors * threadFactor) + 1, which is optimal for
        * minimizing elapsed processing time of purely CPU bound tasks. A higher value should be chosen for IO bound tasks.
        */
       public int getThreadCount() {
           if (threadCount == null) {
               int cpuCores = Runtime.getRuntime().availableProcessors();
               threadCount = (int)(cpuCores * getThreadFactor()) + 1;
           }
           return threadCount;
       }

       /**
        * @return a lazy initialized executor to use by this Functional
        * @return
        */
       public Executor getExecutor() {
           if (executor == null) {
               executor = Executors.newFixedThreadPool(getThreadCount());
           }
           return executor;
       }

       /**
        * @param data
        * @return collection of data collections (segments) suitable for for parallel processing
        */
        public <F> Collection<Collection<F>> split(Collection<F> data, int minimumSegmentSize) {
           List<F> dataList = (data instanceof List) ? (List<F>)data : new ArrayList<F>(data);

           int dataSize = data.size();
           int segmentCount = Math.min(
                   (dataSize + minimumSegmentSize - 1) / minimumSegmentSize,
                   (int) (getThreadCount() * getDecompositionFactor())
                   );
           Collection<Collection<F>> segments = new ArrayList<Collection<F>>(segmentCount);
           int fromElement = 0;

           while (fromElement < dataSize) {
               int remainingSegments = segmentCount - segments.size();
               int remainingElements = dataSize - fromElement;
               int elementsInSegment = Math.max(minimumSegmentSize, remainingElements / remainingSegments);
               /* the following min() restriction may result in segments smaller than minimumSegmentSize.
                * Example: minimumSegmentSize=4; dataSize=5, threadCount*decompositionFactor=2
                * => segmentCount=2
                * => segment 0: elementsInSegment=2; segment 1: elementsInSegment=3
                */
               int toElement = Math.min(dataSize, fromElement + elementsInSegment);
               Collection<F> segment = dataList.subList(fromElement, toElement);
               segments.add(segment);
               fromElement = toElement;
           }

           return segments;
       }
    }
}
