package concurrent

import (
	"container/heap"
	"runtime"
	"sync"

	"gitee.com/wick.zt/gotils/itertools"
)

var (
	// ResultBufSize suits the speed difference of `Iterator.Next`,
	// `ProcessFunc` and result handling (consuming of `chan Result`).
	ResultBufSize int = 1024
)

// Result is the output type of the concurrent runner which contains
// both the origin input `Arg` for further logging or processing and
// the processed `Val`.
type Result struct {
	// Arg is the input argument for the `ProcessFunc`.
	Arg interface{}
	// Val is the processed result from the `ProcessFunc`.
	Val interface{}
	// Err is the error when processing with the `Arg`.
	Err error
}

// ProcessFunc is a general function takes input arguments and make
// result or report error.
type ProcessFunc func(arg interface{}) (val interface{}, err error)

// Run iterates the input data concurrently.
func Run(wg *sync.WaitGroup, it itertools.Iterator, pf ProcessFunc, n int) <-chan Result {
	if n < 1 {
		n = runtime.NumCPU() - 1
	}
	rc := make(chan Result, ResultBufSize)
	go func() {
		if wg != nil {
			defer wg.Done()
		}
		defer close(rc)
		wgWork := &sync.WaitGroup{}
		sem := make(chan struct{}, n)
		for it.Next() {
			if err := it.Err(); err != nil {
				rc <- Result{nil, nil, err}
				continue
			}
			sem <- struct{}{}
			wgWork.Add(1)
			go func(arg interface{}) {
				defer wgWork.Done()
				val, err := pf(arg)
				rc <- Result{arg, val, err}
				<-sem
			}(it.Val())
		}
		close(sem)
		wgWork.Wait()
	}()
	return rc
}

type indexedResult struct {
	Idx int
	Result
}

type ascHeap []indexedResult

// Len implements the `sort.Interface`.
func (h ascHeap) Len() int { return len(h) }

// Less implements the `sort.Interface`.
func (h ascHeap) Less(i, j int) bool { return h[i].Idx < h[j].Idx }

// Swap implements the `sort.Interface`.
func (h ascHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }

// Push implements the `container/heap.Interface`.
func (h *ascHeap) Push(v interface{}) { *h = append(*h, v.(indexedResult)) }

// Pop implements the `container/heap.Interface`.
func (h *ascHeap) Pop() (v interface{}) {
	n := h.Len() - 1
	v, *h = (*h)[n], (*h)[:n]
	return
}

// RunStable iterates the input data concurrently and keeps the output
// results in the sending order.
func RunStable(wg *sync.WaitGroup, it itertools.Iterator, pf ProcessFunc, n int) <-chan Result {
	if n < 1 {
		n = runtime.NumCPU() - 1
	}
	hc := make(chan indexedResult, n)
	rc := make(chan Result, ResultBufSize)
	go func() {
		if wg != nil {
			defer wg.Done()
		}
		defer close(rc)
		wait := make(chan struct{})
		// sort the result ascendingly by `Idx`.
		go func() {
			cur := 0
			h := make(ascHeap, 0)
			for v := range hc {
				heap.Push(&h, v)
				for h.Len() > 0 && h[0].Idx == cur {
					x := heap.Pop(&h).(indexedResult)
					rc <- x.Result
					cur++
				}
			}
			wait <- struct{}{}
			close(wait)
		}()
		// spawn worker goroutines just like the unstable version does.
		wgWork := &sync.WaitGroup{}
		sem := make(chan struct{}, n)
		for i := 0; it.Next(); i++ {
			if err := it.Err(); err != nil {
				hc <- indexedResult{i, Result{nil, nil, err}}
				continue
			}
			wgWork.Add(1)
			sem <- struct{}{}
			go func(idx int, arg interface{}) {
				defer wgWork.Done()
				val, err := pf(arg)
				hc <- indexedResult{idx, Result{arg, val, err}}
				<-sem
			}(i, it.Val())
		}
		close(sem)
		wgWork.Wait()
		close(hc)
		<-wait // close heap chan "hc" first to end sort loop
	}()
	return rc
}
