// Copyright 2024 Matrix Origin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package iscp

import (
	"context"
	"encoding/hex"
	"fmt"
	"slices"

	// "encoding/hex"
	"math"
	// "slices"
	"strconv"
	"strings"
	"time"

	"go.uber.org/zap"

	"github.com/matrixorigin/matrixone/pkg/common/moerr"
	"github.com/matrixorigin/matrixone/pkg/common/mpool"
	"github.com/matrixorigin/matrixone/pkg/container/batch"
	"github.com/matrixorigin/matrixone/pkg/container/bytejson"

	// "github.com/matrixorigin/matrixone/pkg/container/bytejson"
	"github.com/matrixorigin/matrixone/pkg/container/types"
	"github.com/matrixorigin/matrixone/pkg/container/vector"
	"github.com/matrixorigin/matrixone/pkg/logutil"
	"github.com/matrixorigin/matrixone/pkg/txn/client"
	"github.com/matrixorigin/matrixone/pkg/vm/engine"
)

// extractRowFromEveryVector gets the j row from the every vector and outputs the row
// bat columns layout:
// 1. data: user defined cols | cpk (if needed) | commit-ts
// 2. tombstone: pk/cpk | commit-ts
// return user defined cols for data or only one cpk column for tombstone
func extractRowFromEveryVector(
	ctx context.Context,
	dataSet *batch.Batch,
	rowIndex int,
	row []any,
) error {
	for i := 0; i < len(row); i++ {
		vec := dataSet.Vecs[i]
		rowIndexBackup := rowIndex
		if vec.IsConstNull() {
			row[i] = nil
			continue
		}
		if vec.IsConst() {
			rowIndex = 0
		}

		if err := extractRowFromVector(ctx, vec, i, row, rowIndex); err != nil {
			return err
		}
		rowIndex = rowIndexBackup
	}
	return nil
}

// extractRowFromVector gets the rowIndex row from the i vector
func extractRowFromVector(ctx context.Context, vec *vector.Vector, i int, row []any, rowIndex int) error {
	if vec.IsConstNull() || vec.GetNulls().Contains(uint64(rowIndex)) {
		row[i] = nil
		return nil
	}

	switch vec.GetType().Oid { //get col
	case types.T_json:
		row[i] = types.DecodeJson(copyBytes(vec.GetBytesAt(rowIndex)))
	case types.T_bool:
		row[i] = vector.GetFixedAtWithTypeCheck[bool](vec, rowIndex)
	case types.T_bit:
		row[i] = vector.GetFixedAtWithTypeCheck[uint64](vec, rowIndex)
	case types.T_int8:
		row[i] = vector.GetFixedAtWithTypeCheck[int8](vec, rowIndex)
	case types.T_uint8:
		row[i] = vector.GetFixedAtWithTypeCheck[uint8](vec, rowIndex)
	case types.T_int16:
		row[i] = vector.GetFixedAtWithTypeCheck[int16](vec, rowIndex)
	case types.T_uint16:
		row[i] = vector.GetFixedAtWithTypeCheck[uint16](vec, rowIndex)
	case types.T_int32:
		row[i] = vector.GetFixedAtWithTypeCheck[int32](vec, rowIndex)
	case types.T_uint32:
		row[i] = vector.GetFixedAtWithTypeCheck[uint32](vec, rowIndex)
	case types.T_int64:
		row[i] = vector.GetFixedAtWithTypeCheck[int64](vec, rowIndex)
	case types.T_uint64:
		row[i] = vector.GetFixedAtWithTypeCheck[uint64](vec, rowIndex)
	case types.T_float32:
		row[i] = vector.GetFixedAtWithTypeCheck[float32](vec, rowIndex)
	case types.T_float64:
		row[i] = vector.GetFixedAtWithTypeCheck[float64](vec, rowIndex)
	case types.T_char, types.T_varchar, types.T_blob, types.T_text, types.T_binary, types.T_varbinary, types.T_datalink:
		row[i] = copyBytes(vec.GetBytesAt(rowIndex))
	case types.T_array_float32:
		// NOTE: Don't merge it with T_varchar. You will get raw binary in the SQL output
		//+------------------------------+
		//| abs(cast([1,2,3] as vecf32)) |
		//+------------------------------+
		//|   �?   @  @@                  |
		//+------------------------------+
		row[i] = vector.GetArrayAt[float32](vec, rowIndex)
	case types.T_array_float64:
		row[i] = vector.GetArrayAt[float64](vec, rowIndex)
	case types.T_date:
		row[i] = vector.GetFixedAtWithTypeCheck[types.Date](vec, rowIndex)
	case types.T_datetime:
		scale := vec.GetType().Scale
		row[i] = vector.GetFixedAtWithTypeCheck[types.Datetime](vec, rowIndex).String2(scale)
	case types.T_time:
		scale := vec.GetType().Scale
		row[i] = vector.GetFixedAtWithTypeCheck[types.Time](vec, rowIndex).String2(scale)
	case types.T_timestamp:
		scale := vec.GetType().Scale
		//TODO:get the right timezone
		//timeZone := ses.GetTimeZone()
		timeZone := time.UTC
		row[i] = vector.GetFixedAtWithTypeCheck[types.Timestamp](vec, rowIndex).String2(timeZone, scale)
	case types.T_decimal64:
		scale := vec.GetType().Scale
		row[i] = vector.GetFixedAtWithTypeCheck[types.Decimal64](vec, rowIndex).Format(scale)
	case types.T_decimal128:
		scale := vec.GetType().Scale
		row[i] = vector.GetFixedAtWithTypeCheck[types.Decimal128](vec, rowIndex).Format(scale)
	case types.T_uuid:
		row[i] = vector.GetFixedAtWithTypeCheck[types.Uuid](vec, rowIndex).String()
	case types.T_Rowid:
		row[i] = vector.GetFixedAtWithTypeCheck[types.Rowid](vec, rowIndex)
	case types.T_Blockid:
		row[i] = vector.GetFixedAtWithTypeCheck[types.Blockid](vec, rowIndex)
	case types.T_TS:
		row[i] = vector.GetFixedAtWithTypeCheck[types.TS](vec, rowIndex)
	case types.T_enum:
		row[i] = vector.GetFixedAtWithTypeCheck[types.Enum](vec, rowIndex)
	default:
		logutil.Error(
			"Failed to extract row from vector, unsupported type",
			zap.Int("typeID", int(vec.GetType().Oid)))
		return moerr.NewInternalErrorf(ctx, "extractRowFromVector : unsupported type %d", vec.GetType().Oid)
	}
	return nil
}

func copyBytes(src []byte) []byte {
	if len(src) > 0 {
		dst := make([]byte, len(src))
		copy(dst, src)
		return dst
	} else {
		return []byte{}
	}
}

func convertColIntoSql(
	ctx context.Context,
	data any,
	typ *types.Type,
	sqlBuff []byte) ([]byte, error) {
	if data == nil {
		typstr := typ.DescString()
		sqlBuff = appendString(sqlBuff, fmt.Sprintf("CAST(NULL as %s)", typstr))
		return sqlBuff, nil
	}
	var temp string
	switch typ.Oid { //get col
	case types.T_json:
		sqlBuff = appendByte(sqlBuff, '\'')
		temp = data.(bytejson.ByteJson).String()
		sqlBuff = appendString(sqlBuff, temp)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_bool:
		b := data.(bool)
		if b {
			temp = "true"
		} else {
			temp = "false"
		}
		sqlBuff = appendString(sqlBuff, temp)
	case types.T_bit:
		value := data.(uint64)
		bitLength := typ.Width
		byteLength := (bitLength + 7) / 8
		b := types.EncodeUint64(&value)[:byteLength]
		slices.Reverse(b)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendBytes(sqlBuff, b)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_int8:
		value := data.(int8)
		sqlBuff = appendInt64(sqlBuff, int64(value))
	case types.T_uint8:
		value := data.(uint8)
		sqlBuff = appendUint64(sqlBuff, uint64(value))
	case types.T_int16:
		value := data.(int16)
		sqlBuff = appendInt64(sqlBuff, int64(value))
	case types.T_uint16:
		value := data.(uint16)
		sqlBuff = appendUint64(sqlBuff, uint64(value))
	case types.T_int32:
		value := data.(int32)
		sqlBuff = appendInt64(sqlBuff, int64(value))
	case types.T_uint32:
		value := data.(uint32)
		sqlBuff = appendUint64(sqlBuff, uint64(value))
	case types.T_int64:
		value := data.(int64)
		sqlBuff = appendInt64(sqlBuff, value)
	case types.T_uint64:
		value := data.(uint64)
		sqlBuff = appendUint64(sqlBuff, value)
	case types.T_float32:
		value := data.(float32)
		sqlBuff = appendFloat64(sqlBuff, float64(value), 32)
	case types.T_float64:
		value := data.(float64)
		sqlBuff = appendFloat64(sqlBuff, value, 64)
	case types.T_binary, types.T_varbinary, types.T_blob:
		sqlBuff = appendHex(sqlBuff, data.([]byte))
	case types.T_char,
		types.T_varchar,
		types.T_text,
		types.T_datalink:
		value := string(data.([]byte))
		value = strings.Replace(value, "\\", "\\\\", -1)
		value = strings.Replace(value, "'", "\\'", -1)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendBytes(sqlBuff, []byte(value))
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_array_float32:
		// NOTE: Don't merge it with T_varchar. You will get raw binary in the SQL output
		//+------------------------------+
		//| abs(cast([1,2,3] as vecf32)) |
		//+------------------------------+
		//|   �?   @  @@                  |
		//+------------------------------+
		value := data.([]float32)
		typstr := typ.DescString()
		sqlBuff = appendString(sqlBuff, fmt.Sprintf("CAST('%s' as %s)", types.ArrayToString(value), typstr))
	case types.T_array_float64:
		value := data.([]float64)
		typstr := typ.DescString()
		sqlBuff = appendString(sqlBuff, fmt.Sprintf("CAST('%s' as %s)", types.ArrayToString(value), typstr))
	case types.T_date:
		value := data.(types.Date)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value.String())
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_datetime:
		value := data.(string)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_time:
		value := data.(string)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_timestamp:
		value := data.(string)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_decimal64:
		value := data.(string)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_decimal128:
		value := data.(string)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_uuid:
		value := data.(string)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value)
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_Rowid:
		value := data.(types.Rowid)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value.String())
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_Blockid:
		value := data.(types.Blockid)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value.String())
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_TS:
		value := data.(types.TS)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value.ToString())
		sqlBuff = appendByte(sqlBuff, '\'')
	case types.T_enum:
		value := data.(types.Enum)
		sqlBuff = appendByte(sqlBuff, '\'')
		sqlBuff = appendString(sqlBuff, value.String())
		sqlBuff = appendByte(sqlBuff, '\'')
	default:
		logutil.Error(
			"Failed to extract row from vector, unsupported type",
			zap.Int("typeID", int(typ.Oid)))
		return nil, moerr.NewInternalErrorf(ctx, "extractRowFromVector : unsupported type %d", typ.Oid)
	}

	return sqlBuff, nil
}

func appendHex(dst []byte, src []byte) []byte {
	dst = append(dst, "x'"...)
	dst = hex.AppendEncode(dst, src)
	dst = append(dst, '\'')
	return dst
}

func appendByte(buf []byte, d byte) []byte {
	return append(buf, d)
}

func appendBytes(buf []byte, data []byte) []byte {
	return append(buf, data...)
}

func appendString(buf []byte, s string) []byte {
	return appendBytes(buf, []byte(s))
}

func appendInt64(buf []byte, value int64) []byte {
	return strconv.AppendInt(buf, value, 10)
}

func appendUint64(buf []byte, value uint64) []byte {
	return strconv.AppendUint(buf, value, 10)
}

func appendFloat64(buf []byte, value float64, bitSize int) []byte {
	if !math.IsInf(value, 0) {
		buf = strconv.AppendFloat(buf, value, 'f', -1, bitSize)
	} else {
		if math.IsInf(value, 1) {
			buf = append(buf, []byte("+Infinity")...)
		} else {
			buf = append(buf, []byte("-Infinity")...)
		}
	}
	return buf
}

var CollectChanges = func(ctx context.Context, rel engine.Relation, fromTs, toTs types.TS, mp *mpool.MPool) (engine.ChangesHandle, error) {
	return rel.CollectChanges(ctx, fromTs, toTs, false, mp)
}

func batchRowCount(bat *batch.Batch) int {
	if bat == nil || len(bat.Vecs) == 0 {
		return 0
	}
	return bat.Vecs[0].Length()
}

func getTxn(
	ctx context.Context,
	cnEngine engine.Engine,
	cnTxnClient client.TxnClient,
	info string,
) (client.TxnOperator, error) {
	nowTs := cnEngine.LatestLogtailAppliedTime()
	createByOpt := client.WithTxnCreateBy(
		0,
		"",
		info,
		0)
	op, err := cnTxnClient.New(ctx, nowTs, createByOpt)
	if err != nil {
		return nil, err
	}
	err = cnEngine.New(ctx, op)
	if err != nil {
		return nil, err
	}
	return op, nil
}

var CheckLeaseWithRetry = func(
	ctx context.Context,
	cnUUID string,
	txnEngine engine.Engine,
	cnTxnClient client.TxnClient,
) (ok bool, err error) {
	defer func() {
		if err != nil || !ok {
			logutil.Errorf(
				"ISCP-Task check lease failed, err=%v, ok=%v, cnUUID=%v",
				zap.Error(err),
				zap.Bool("ok", ok),
				zap.String("cnUUID", cnUUID),
			)
		}
	}()
	err = retry(
		ctx,
		func() error {
			ok, err = checkLease(ctx, cnUUID, txnEngine, cnTxnClient)
			return err
		},
		DefaultRetryTimes,
		DefaultRetryInterval,
		DefaultRetryDuration,
	)
	return
}

func checkLease(
	ctx context.Context,
	cnUUID string,
	txnEngine engine.Engine,
	cnTxnClient client.TxnClient,
) (ok bool, err error) {
	ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Minute*5)
	defer cancel()
	txn, err := getTxn(ctxWithTimeout, txnEngine, cnTxnClient, "iscp check lease")
	if err != nil {
		return
	}
	defer txn.Commit(ctxWithTimeout)

	sql := `select task_runner from mo_task.sys_daemon_task where task_type = "ISCP" and task_runner is not null`
	result, err := ExecWithResult(ctxWithTimeout, sql, cnUUID, txn)
	if err != nil {
		return
	}
	defer result.Close()
	result.ReadRows(func(rows int, cols []*vector.Vector) bool {
		if rows != 1 {
			err = moerr.NewInternalErrorNoCtx(fmt.Sprintf("unexpected rows count: %d", rows))
			return false
		}
		runner := cols[0].GetStringAt(0)
		if runner == "" {
			err = moerr.NewInternalErrorNoCtx("task runner is null")
			return false
		}
		if runner == cnUUID {
			ok = true
		} else {
			logutil.Errorf(
				"ISCP-Task check lease failed, runner: %s, expected: %s",
				runner,
				cnUUID,
			)
		}
		return false
	})
	return
}
