// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.

package colexechash

import (
	"context"

	"github.com/cockroachdb/cockroach/pkg/col/coldata"
	"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)

// initHash, rehash, and finalizeHash work together to compute the hash value
// for an individual key tuple which represents a row's equality columns. Since
// this key is a tuple of various types, rehash is used to apply a
// transformation on the resulting hash value based on an element of the key of
// a specified type.
//
// The combination of these three functions actually defines a hashing function
// family - changing the initial hash value will produce a "different" hash
// function.
//
// We currently use the same hash functions used by go's maps.
// TODO(asubiotto): Once https://go-review.googlesource.com/c/go/+/155118/ is
// in, we should use the public API.

// DefaultInitHashValue is the default initValue to be used in initHash
// function.
const DefaultInitHashValue = 1

var (
	uint32OneColumn []uint32
	uint32TwoColumn []uint32
	uint64OneColumn []uint64
	uint64TwoColumn []uint64
)

func init() {
	uint32OneColumn = make([]uint32, coldata.MaxBatchSize)
	uint32TwoColumn = make([]uint32, coldata.MaxBatchSize)
	for i := range uint32OneColumn {
		uint32OneColumn[i] = 1
		uint32TwoColumn[i] = 2
	}
	uint64OneColumn = make([]uint64, coldata.MaxBatchSize)
	uint64TwoColumn = make([]uint64, coldata.MaxBatchSize)
	for i := range uint64OneColumn {
		uint64OneColumn[i] = 1
		uint64TwoColumn[i] = 2
	}
}

// rehash takes an element of a key (tuple representing a row of equality
// column values) at a given column and computes a new hash by applying a
// transformation to the existing hash. This function is generated by execgen,
// so it doesn't appear in this file. Look at hash_utils_tmpl.go for the source
// code.
//
// initHash initializes the hash value of each key to its initial state for
// rehashing purposes.
// NOTE: initValue *must* be non-zero and nKeys is assumed to be positive.
func initHash(buckets []uint32, nKeys int, initValue uint32) {
	switch initValue {
	case 1:
		for n := 0; n < nKeys; n += copy(buckets[n:], uint32OneColumn) {
		}
	case 2:
		for n := 0; n < nKeys; n += copy(buckets[n:], uint32TwoColumn) {
		}
	default:
		// Early bounds checks.
		_ = buckets[nKeys-1]
		for i := 0; i < nKeys; i++ {
			//gcassert:bce
			buckets[i] = initValue
		}
	}
}

func initHash64(buckets []uint64, nKeys int, initValue uint64) {
	switch initValue {
	case 1:
		for n := 0; n < nKeys; n += copy(buckets[n:], uint64OneColumn) {
		}
	case 2:
		for n := 0; n < nKeys; n += copy(buckets[n:], uint64TwoColumn) {
		}
	default:
		// Early bounds checks.
		_ = buckets[nKeys-1]
		for i := 0; i < nKeys; i++ {
			//gcassert:bce
			buckets[i] = initValue
		}
	}
}

// finalizeHash takes each key's hash value and applies a final transformation
// onto it so that it fits within numBuckets buckets.
// NOTE: nKeys is assumed to be positive.
func finalizeHash[T uint32 | uint64](buckets []T, nKeys int, numBuckets T) {
	// Early bounds checks.
	_ = buckets[nKeys-1]
	isPowerOfTwo := numBuckets&(numBuckets-1) == 0
	if isPowerOfTwo {
		for i := 0; i < nKeys; i++ {
			// Since numBuckets is a power of 2, modulo numBuckets could be
			// optimized into a bitwise operation which improves benchmark
			// performance by 20%. In effect, the following code is equivalent
			// to (but faster than):
			// buckets[i] %= numBuckets
			//gcassert:bce
			buckets[i] &= numBuckets - 1
		}
	} else {
		for i := 0; i < nKeys; i++ {
			buckets[i] %= numBuckets
		}
	}
}

// TupleHashDistributor is a helper struct that distributes tuples from batches
// according to the corresponding hashes. The "distribution" occurs by
// populating selection vectors which the caller needs to use accordingly.
type TupleHashDistributor struct {
	// InitHashValue is the value used to initialize the hash buckets. Different
	// values can be used to define different hash functions.
	InitHashValue uint64
	// buckets will contain the computed hash value of a group of columns with
	// the same index in the current batch.
	buckets []uint64
	// selections stores the selection vectors that actually define how to
	// distribute the tuples from the batch.
	selections [][]int
	// cancelChecker is used during the hashing of the rows to distribute to
	// check for query cancellation.
	cancelChecker colexecutils.CancelChecker
	datumAlloc    tree.DatumAlloc
}

// NewTupleHashDistributor returns a new TupleHashDistributor.
func NewTupleHashDistributor(initHashValue uint64, numOutputs int) *TupleHashDistributor {
	return &TupleHashDistributor{
		InitHashValue: initHashValue,
		selections:    make([][]int, numOutputs),
	}
}

// Init initializes the TupleHashDistributor. Second, third, etc calls are
// noops.
func (d *TupleHashDistributor) Init(ctx context.Context) {
	d.cancelChecker.Init(ctx)
}

// Distribute populates selection vectors to route each of the tuples in b to
// one of the numOutputs outputs according to the computed on hashCols hash
// values.
// NOTE: b is assumed to be non-zero batch.
// NOTE: the distributor *must* be initialized before the first use.
func (d *TupleHashDistributor) Distribute(b coldata.Batch, hashCols []uint32) [][]int {
	n := b.Length()
	if cap(d.buckets) < n {
		d.buckets = make([]uint64, n)
	} else {
		d.buckets = d.buckets[:n]
	}
	initHash64(d.buckets, n, d.InitHashValue)

	// Check if we received a batch with more tuples than the current
	// allocation size and increase it if so.
	if n > d.datumAlloc.DefaultAllocSize {
		d.datumAlloc.DefaultAllocSize = n
	}

	for _, i := range hashCols {
		rehash(d.buckets, b.ColVec(int(i)), n, b.Selection(), d.cancelChecker, &d.datumAlloc)
	}

	finalizeHash(d.buckets, n, uint64(len(d.selections)))

	// Reset selections.
	for i := 0; i < len(d.selections); i++ {
		d.selections[i] = d.selections[i][:0]
	}

	// Build a selection vector for each output.
	selection := b.Selection()
	// Early bounds checks.
	buckets := d.buckets
	_ = buckets[n-1]
	if selection != nil {
		for i, selIdx := range selection[:n] {
			//gcassert:bce
			outputIdx := buckets[i]
			d.selections[outputIdx] = append(d.selections[outputIdx], selIdx)
		}
	} else {
		for i := 0; i < n; i++ {
			//gcassert:bce
			outputIdx := buckets[i]
			d.selections[outputIdx] = append(d.selections[outputIdx], i)
		}
	}
	return d.selections
}

// ResetNumOutputs sets up the TupleHashDistributor to distribute the tuples
// to a different number of outputs.
func (d *TupleHashDistributor) ResetNumOutputs(numOutputs int) {
	if cap(d.selections) >= numOutputs {
		d.selections = d.selections[:numOutputs]
		return
	}
	// We need to allocate new selections slice, but we also want to keep all
	// old selection vectors and reuse them if possible.
	oldSelections := d.selections
	d.selections = make([][]int, numOutputs)
	copy(d.selections, oldSelections)
}
