package mapreduce

import (
	"context"
	"gitee.com/fishjam/go-library/flog"
	"gitee.com/fishjam/go-library/utils"
	"golang.org/x/exp/maps"
	"golang.org/x/exp/slices"
	"sync"
	"sync/atomic"
)

/***********************************************************************************************************************
* 模仿 Java Lambda Stream 的 stream.map 和 parallelStream.map
* TODO:
*   1.如果 map/ParallelMap 结果的数量很大,是否会出现内存问题?可以考虑通过回调或 chan 的方式处理?
*   2.加入统计信息 Stats
*
// TODO: 这里对 []string 的结果进行  Equal/EqualValues, 在不等的时候,无法显示详细的值, 需要提供 msgAndArgs 显示
// github.com/c4pt0r/gomapreduce
// https://github.com/madiks/goMapReduce/  <== 2015 年前提交的
*
***********************************************************************************************************************/

type MapperFunc[T any, R any] func(context.Context, T) (R, OperationType, error)

// ResultsMap 由于 Map 操作中有可能提前中断(Stop),而且如果是异步处理的话,其结果的顺序是不确定的,
// 因此返回的结果是: 输入值位置索引(0 基址) => 结果 的 map, 其个数有可能少于输入值的个数(如果发生错误,里面也包含了出错的结果)
type ResultsMap[T any, R any] map[int64]*OutputItem[T, R]

// ConvertResult 将 map[int64]*OutputItem[R] 的结果转换成 数组的格式,如果完全正确,则数组的个数和输入数组的个数一样
func (r *ResultsMap[T, R]) ConvertResult() ([]R, []error, OperationType) {
	opType := Continue
	results := make([]R, 0, len(*r))
	errs := make([]error, 0, len(*r))

	//获取所有的 id 列表,并排序, 之后按顺序获取结果
	ids := maps.Keys(*r)
	slices.Sort(ids)

	for idx := range ids {
		results = append(results, (*r)[int64(idx)].Result)
		errs = append(errs, (*r)[int64(idx)].Err)
		if (*r)[int64(idx)].OpType != Continue {
			opType = (*r)[int64(idx)].OpType
		}
	}
	return results, errs, opType
}

// Map 串行的 Map 函数
func Map[T any, R any](ctx context.Context, inputs []T, mapper MapperFunc[T, R]) ResultsMap[T, R] {
	results := make(ResultsMap[T, R])
	for idx, item := range inputs {
		r, opType, err := mapper(ctx, item)
		results[int64(idx)] = &OutputItem[T, R]{Index: int64(idx), OpType: opType, Item: item, Result: r, Err: err}
		if Continue != opType {
			flog.Debugf("mapper fail: item.index=%d, item=%v", idx, item)
			break
		}
	}
	return results
}

type ParallelMapImpl[T any, R any] struct {
	ctx         context.Context
	concurrency int
	name        string
	wg          sync.WaitGroup
	chInput     chan *InputItem[T]
	chOutput    chan *OutputItem[T, R]
	willStop    atomic.Bool //判断是否会提前退出的标志
}

func NewParallelMapImpl[T any, R any](ctx context.Context, concurrency int, name string) *ParallelMapImpl[T, R] {
	parallelMap := &ParallelMapImpl[T, R]{
		ctx:         ctx,
		name:        name,
		concurrency: concurrency,
		chInput:     make(chan *InputItem[T], concurrency), // 注意: 这里似乎用 0 大小的 chan 也可以
		chOutput:    make(chan *OutputItem[T, R], concurrency),
	}
	parallelMap.willStop.Store(false)
	return parallelMap
}

func (m *ParallelMapImpl[T, R]) Close() error {
	//TODO: don't need the chan
	//close(m.chInput)
	//close(m.chOutput)
	m.willStop.Store(true)
	return nil
}

func (m *ParallelMapImpl[T, R]) workerFunc(idx int, mapper MapperFunc[T, R]) {
	//工作纤程
	defer m.wg.Done()
	for inputItem := range m.chInput {
		flog.Debugf("worker[%s-%d]: read input: %+v", m.name, idx, inputItem)

		if m.willStop.Load() {
			//其他 goroutine 中因为返回 Stop,设置了该变量, 要求所有 goroutine 提前退出
			break
		}
		r, opType, err := mapper(m.ctx, inputItem.item)
		// 即使有错,也应该将其值写入,调用者判断
		flog.Debugf("worker[%s-%d] write item.index=%d, item=%v, result=%+v", m.name, idx, inputItem.index, inputItem.item, r)
		m.chOutput <- &OutputItem[T, R]{
			Index:  inputItem.index,
			Item:   inputItem.item,
			OpType: opType,
			Result: r,
			Err:    err,
		}

		if Continue != opType {
			m.willStop.Store(true)
			flog.Debugf("worker[%s-%d] mapper fail: item.index=%d, item=%v", m.name, idx, inputItem.index, inputItem.item)
			break
		}
	}
}

func (m *ParallelMapImpl[T, R]) startWorkerAndMonitor(realConcurrency int, mapper MapperFunc[T, R]) {

	for i := 0; i < realConcurrency; i++ {
		m.wg.Add(1)
		go func(idx int) {
			flog.Infof("worker[%s-%d]: enter", m.name, idx)
			m.workerFunc(idx, mapper)
			flog.Infof("worker[%s-%d]: leave", m.name, idx)
		}(i)
	}

	go func() {
		flog.Infof("Wait worker done: %s enter", m.name)
		// 等待所有工作纤程都执行完毕,关闭输出队列,这样的话, 会让读取 chOutput 的纤程退出
		m.wg.Wait()
		close(m.chOutput)
		flog.Infof("Wait worker done: %s leave", m.name)
	}()
}

func (m *ParallelMapImpl[T, R]) Start(inputs []T, mapper MapperFunc[T, R]) chan *OutputItem[T, R] {
	realConcurrency := utils.Min(len(inputs), m.concurrency)
	m.startWorkerAndMonitor(realConcurrency, mapper)

	// start producer
	go func() {
		flog.Infof("producer:%s enter", m.name)
		defer close(m.chInput)
		for idx := range inputs {
			if m.willStop.Load() {
				break
			}
			flog.Debugf("push job idx=%d", idx)
			m.chInput <- &InputItem[T]{
				index: int64(idx),
				item:  inputs[idx],
			}
		}
		flog.Infof("producer:%s leave", m.name)
	}()

	return m.chOutput
}

func (m *ParallelMapImpl[T, R]) StartWithChan(startIndex int64, chData chan T, mapper MapperFunc[T, R]) chan *OutputItem[T, R] {
	m.startWorkerAndMonitor(m.concurrency, mapper)

	// start producer
	go func() {
		flog.Infof("producer:%s enter", m.name)
		defer close(m.chInput)
		idx := startIndex
		for inData := range chData {
			if m.willStop.Load() {
				break
			}
			idx++
			flog.Debugf("push job idx=%d", idx)
			m.chInput <- &InputItem[T]{
				index: idx,
				item:  inData,
			}
		}
		flog.Infof("producer:%s leave", m.name)
	}()

	return m.chOutput
}

func (m *ParallelMapImpl[T, R]) ReadMapperResult(chOutput chan *OutputItem[T, R]) ResultsMap[T, R] {
	results := make(map[int64]*OutputItem[T, R])
	flog.Debugf("now will wait for result")
	receiveOrder := 1 //表示异步收到结果的顺序
	for outputs := range chOutput {
		flog.Debugf("receive result , receiveOrder=%d, result=%+v", receiveOrder, outputs)
		results[outputs.Index] = outputs //按索引位置保存
		receiveOrder++
	}
	return results
}

// ParallelMap 并发 Map 方式, 会启动 2 + concurrency 个纤程进行并发的处理.类似纤程池的效果
func ParallelMap[T any, R any](ctx context.Context, inputs []T, concurrency int, name string, mapper MapperFunc[T, R]) ResultsMap[T, R] {
	parallelMap := NewParallelMapImpl[T, R](ctx, concurrency, name)
	chOutput := parallelMap.Start(inputs, mapper)
	return parallelMap.ReadMapperResult(chOutput)
}

// StreamMap 无限 Map, 类似纤程池, 从 chan 中读取数据, 处理完毕后将结果写入 chan
func StreamMap[T any, R any](ctx context.Context, concurrency int, name string,
	startIndex int64, chInput chan T, mapper MapperFunc[T, R]) chan *OutputItem[T, R] {
	parallelMap := NewParallelMapImpl[T, R](ctx, concurrency, name)
	chOutput := parallelMap.StartWithChan(startIndex, chInput, mapper)
	return chOutput
}
