package dataframe

import (
	"context"
	"fmt"
	"math"
	"strings"

	"gitee.com/lilihli/gota/series"
	"golang.org/x/sync/errgroup"
)

// AggregationType Aggregation method type
type AggregationType int

//go:generate stringer -type=AggregationType -linecomment
const (
	Aggregation_MAX    AggregationType = iota + 1 // MAX
	Aggregation_MIN                               // MIN
	Aggregation_MEAN                              // MEAN
	Aggregation_MEDIAN                            // MEDIAN
	Aggregation_STD                               // STD
	Aggregation_SUM                               // SUM
	Aggregation_COUNT                             // COUNT
)

// Groups : structure generated by groupby
type Groups struct {
	groups   map[string]DataFrame
	colnames []string
	coltypes []series.Type
}

// GetGroups returns the grouped data frames created by GroupBy
func (g Groups) GetGroups() map[string]DataFrame {
	return g.groups
}

// 避免Maps()全量转换，直接访问底层数据
func (gps Groups) Aggregation(typs []AggregationType, colnames []string) DataFrame {
	// 前置检查
	if gps.groups == nil {
		return DataFrame{Err: fmt.Errorf("Aggregation: input is nil")}
	}
	if len(typs) != len(colnames) {
		return DataFrame{Err: fmt.Errorf("Aggregation: len(typs) != len(colanmes)")}
	}

	// 预计算结果结构
	resultCols := make([]series.Series, len(gps.colnames)+len(colnames))

	keyCol := make([][]series.Element, len(gps.colnames))
	null := make([]*series.Bitmap, len(gps.colnames))
	for i := 0; i < len(gps.colnames); i++ {
		keyCol[i] = make([]series.Element, len(gps.groups))
		null[i] = series.NewBitmap(len(gps.groups))
	}

	valueCol := make([][]series.Element, len(colnames))
	for i := 0; i < len(colnames); i++ {
		valueCol[i] = make([]series.Element, len(gps.groups))
	}

	count := 0
	for _, df := range gps.groups {
		// 1. 先处理group by列（只需取第一行）
		for i, c := range gps.colnames {
			e0, isNaN := df.Col(c).Elem(0)
			keyCol[i][count] = e0
			null[i].Set(count, isNaN)
		}
		// 2. 批量计算聚合列
		for i, c := range colnames {
			val := computeAggregation(df.Col(c), typs[i])
			valueCol[i][count] = val
		}
		count++
	}
	// 3. 组合结果
	for i, col := range keyCol {
		resultCols[i] = series.NewWithNull(col, gps.coltypes[i], gps.colnames[i], null[i])
	}
	for i, col := range valueCol {
		resultCols[len(gps.colnames)+i] = series.New(col, col[0].Type(), colnames[i]+"_"+typs[i].String())
	}

	return New(resultCols...)
}

// 抽离聚合计算逻辑
func computeAggregation(s series.Series, typ AggregationType) series.Element {
	switch typ {
	case Aggregation_MAX:
		e, err := s.Max()
		if err == nil {
			return e
		}
	case Aggregation_MIN:
		e, err := s.Min()
		if err == nil {
			return e
		}
	case Aggregation_MEAN:
		e, isNaN := series.Floats([]float64{s.Mean()}).Elem(0)
		if !isNaN {
			return e
		}
	case Aggregation_MEDIAN:
		e, isNaN := series.Floats([]float64{s.Median()}).Elem(0)
		if !isNaN {
			return e
		}
	case Aggregation_STD:
		e, isNaN := series.Floats([]float64{s.StdDev()}).Elem(0)
		if !isNaN {
			return e
		}
	case Aggregation_SUM:
		e, isNaN := series.Floats([]float64{s.Sum()}).Elem(0)
		if !isNaN {
			return e
		}
	case Aggregation_COUNT:
		e, isNaN := series.Ints([]int{s.Len()}).Elem(0)
		if !isNaN {
			return e
		}
	}
	e, _ := series.Floats([]float64{math.NaN()}).Elem(0)
	return e
}

func (gps Groups) Arrange(order ...Order) *Groups {
	group := gps.copy()
	for groupName, df := range gps.groups {
		group.groups[groupName] = df.Arrange(order...)
	}
	return group
}

func (gps Groups) merge() DataFrame {
	var dfs []DataFrame
	for _, df := range gps.groups {
		dfs = append(dfs, df)
	}
	return ConcatDataframe(dfs)
}

func ConcatDataframe(dfs []DataFrame) DataFrame {
	var se [][]series.Element
	var null []*series.Bitmap
	var coltypes []series.Type
	var colnames []string
	size := 0
	for _, df := range dfs {
		size += df.Nrow()
	}
	pos := 0
	for _, df := range dfs {
		if se == nil {
			se = make([][]series.Element, len(df.Names()))
			null = make([]*series.Bitmap, len(df.Names()))
			coltypes = df.Types()
			colnames = df.Names()
			for i := range df.Names() {
				null[i] = series.NewBitmap(size)
				se[i] = make([]series.Element, size)
			}
		}
		for k, name := range df.Names() {
			s := df.Col(name)
			for i := 0; i < s.Len(); i++ {
				e, isNaN := s.Elem(i)
				se[k][pos+i] = e
				null[k].Set(pos+i, isNaN)
			}
		}
		pos += df.Nrow()
	}
	var ss []series.Series
	for i := 0; i < len(se); i++ {
		ss = append(ss, series.New(se[i], coltypes[i], colnames[i]))
	}
	return New(ss...)
}

func (gps Groups) Dataframe(groupValues ...string) DataFrame {
	if len(groupValues) == 0 {
		return gps.merge()
	}
	key := ""
	for _, v := range groupValues {
		key += v
	}
	return gps.groups[key]
}

func (gps Groups) ForEach(deal func(groupName string, df DataFrame) error) error {
	for groupName, df := range gps.groups {
		err := deal(groupName, df)
		if err != nil {
			return fmt.Errorf("deal failed, %v", err)
		}
	}
	return nil
}

func (gps Groups) ForEachConcurrent(deal func(groupName string, df DataFrame) error) error {
	g, _ := errgroup.WithContext(context.Background())
	// workerCount := runtime.NumCPU()
	// if workerCount >= 4 {
	// 	workerCount -= 2
	// }
	// g.SetLimit(workerCount) // 设置最大并发数

	for groupName, df := range gps.groups {
		groupName2 := groupName
		df2 := df

		g.Go(func() error {
			err := deal(groupName2, df2)
			if err != nil {
				return fmt.Errorf("deal failed, %v", err)
			}
			return nil
		})
	}

	return g.Wait()
}

func (gps Groups) DeepCopy() *Groups {
	group := gps.copy()
	for groupName, df := range gps.groups {
		group.groups[groupName] = df.Copy()
	}
	return group
}

// copy 浅拷贝
func (gps Groups) copy() *Groups {
	group := &Groups{}
	group.colnames = make([]string, len(gps.colnames))
	copy(group.colnames, gps.colnames)
	group.coltypes = make([]series.Type, len(gps.coltypes))
	copy(group.coltypes, gps.coltypes)
	// group._colIndexs = make([]int, len(gps._colIndexs))
	// copy(group._colIndexs, gps._colIndexs)
	group.groups = make(map[string]DataFrame)
	for groupName, df := range gps.groups {
		group.groups[groupName] = df
	}
	return group
}

func (gps Groups) Mutate(m map[string]series.Series) (*Groups, error) {
	if len(gps.groups) != len(m) {
		return nil, fmt.Errorf("the length of the parameters does not match")
	}
	group := gps.copy()
	for groupName, df := range gps.groups {
		s1, ok := m[groupName]
		if !ok {
			return nil, fmt.Errorf("group %s not exist", groupName)
		}
		group.groups[groupName] = df.Mutate(s1)
	}
	return group, nil
}

// GroupBy Group dataframe by columns
func (df DataFrame) GroupBy(groupColNames ...string) (*Groups, error) {
	if len(groupColNames) <= 0 {
		return nil, fmt.Errorf("groupColNames is empty")
	}
	colNames := df.Names()
	colTypes := df.Types()
	colIndexs, err := getIndex(colNames, groupColNames...)
	if err != nil {
		return nil, fmt.Errorf("getIndex failed, %v, %v, %v", colNames, groupColNames, err)
	}

	groupColTypes := make([]series.Type, len(groupColNames))
	for i, idx := range colIndexs {
		groupColTypes[i] = colTypes[idx]
	}

	m := make(map[string][]int)
	for i := 0; i < df.Nrow(); i++ {
		var keys []string
		for _, j := range colIndexs {
			e, isNaN := df.Elem(i, j)
			if isNaN {
				keys = append(keys, "NaN")
			} else {
				keys = append(keys, e.String())
			}
		}
		key := strings.Join(keys, "_")
		lst, ok := m[key]
		if ok {
			lst = append(lst, i)
			m[key] = lst
		} else {
			m[key] = []int{i}
		}
	}
	groupDataFrame := make(map[string]DataFrame)
	for key, lst := range m {
		groupDataFrame[key] = df.Subset(lst)
	}

	groups := &Groups{groups: groupDataFrame, colnames: groupColNames, coltypes: groupColTypes}
	return groups, nil
}

// 计算 names 中的每个元素在 ns 数组中的位置，并以 []int 的方式返回
func getIndex(ns []string, names ...string) (indexs []int, err error) {
	mname := make(map[string]int)
	for i := 0; i < len(ns); i++ {
		mname[ns[i]] = i
	}

	for _, name := range names {
		i, ok := mname[name]
		if !ok {
			return nil, fmt.Errorf("%s does not exist", name)
		}
		indexs = append(indexs, i)
	}
	return indexs, nil
}
