package aws

import (
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"strconv"
	"sync"
	"time"

	"github.com/Jeffail/gabs/v2"
	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/service/dynamodb"
	"github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
	"github.com/cenkalti/backoff/v4"

	"github.com/warpstreamlabs/bento/internal/impl/aws/config"
	"github.com/warpstreamlabs/bento/internal/retries"
	"github.com/warpstreamlabs/bento/public/bloblang"
	"github.com/warpstreamlabs/bento/public/service"
)

const (
	// DynamoDB Output Fields
	ddboField                   = "namespace"
	ddboFieldTable              = "table"
	ddboFieldStringColumns      = "string_columns"
	ddboFieldJSONMapColumns     = "json_map_columns"
	ddboFieldTTL                = "ttl"
	ddboFieldTTLKey             = "ttl_key"
	ddboFieldDelete             = "delete"
	ddboFieldDeleteCondition    = "condition"
	ddboFieldDeletePartitionKey = "partition_key"
	ddboFieldDeleteSortKey      = "sort_key"
	ddboFieldBatching           = "batching"
)

type ddboConfig struct {
	Table                   string
	StringColumns           map[string]*service.InterpolatedString
	JSONMapColumns          map[string]string
	TTL                     string
	TTLKey                  string
	DeleteConditionExec     *bloblang.Executor
	PartitionKeyDeleteField string
	SortKeyDeleteField      string

	aconf       aws.Config
	backoffCtor func() backoff.BackOff
}

func ddboConfigFromParsed(pConf *service.ParsedConfig) (conf ddboConfig, err error) {
	if conf.Table, err = pConf.FieldString(ddboFieldTable); err != nil {
		return
	}
	if conf.StringColumns, err = pConf.FieldInterpolatedStringMap(ddboFieldStringColumns); err != nil {
		return
	}
	if conf.JSONMapColumns, err = pConf.FieldStringMap(ddboFieldJSONMapColumns); err != nil {
		return
	}
	if conf.TTL, err = pConf.FieldString(ddboFieldTTL); err != nil {
		return
	}
	if conf.TTLKey, err = pConf.FieldString(ddboFieldTTLKey); err != nil {
		return
	}
	if conf.aconf, err = GetSession(context.TODO(), pConf); err != nil {
		return
	}
	if conf.backoffCtor, err = retries.CommonRetryBackOffCtorFromParsed(pConf); err != nil {
		return
	}
	deleteConf := pConf.Namespace(ddboFieldDelete)
	var deleteConditionStr string
	if deleteConditionStr, err = deleteConf.FieldString(ddboFieldDeleteCondition); deleteConditionStr != "" {
		if err != nil {
			return
		}
		if conf.DeleteConditionExec, err = deleteConf.FieldBloblang(ddboFieldDeleteCondition); err != nil {
			return
		}
	}
	if conf.PartitionKeyDeleteField, err = deleteConf.FieldString(ddboFieldDeletePartitionKey); err != nil {
		return
	}
	if conf.SortKeyDeleteField, err = deleteConf.FieldString(ddboFieldDeleteSortKey); err != nil {
		return
	}
	return
}

func ddboOutputSpec() *service.ConfigSpec {
	return service.NewConfigSpec().
		Stable().
		Version("1.0.0").
		Categories("Services", "AWS").
		Summary(`Inserts items into or deletes items from a DynamoDB table.`).
		Description(`
The field `+"`string_columns`"+` is a map of column names to string values, where the values are [function interpolated](/docs/configuration/interpolation#bloblang-queries) per message of a batch. This allows you to populate string columns of an item by extracting fields within the document payload or metadata like follows:

`+"```yml"+`
string_columns:
  id: ${!json("id")}
  title: ${!json("body.title")}
  topic: ${!metadata("kafka_topic")}
  full_content: ${!content()}
`+"```"+`

The field `+"`json_map_columns`"+` is a map of column names to json paths, where the [dot path](/docs/configuration/field_paths) is extracted from each document and converted into a map value. Both an empty path and the path `+"`.`"+` are interpreted as the root of the document. This allows you to populate map columns of an item like follows:

`+"```yml"+`
json_map_columns:
  user: path.to.user
  whole_document: .
`+"```"+`

A column name can be empty:

`+"```yml"+`
json_map_columns:
  "": .
`+"```"+`

In which case the top level document fields will be written at the root of the item, potentially overwriting previously defined column values. If a path is not found within a document the column will not be populated.

### Credentials

By default Bento will use a shared credentials file when connecting to AWS services. It's also possible to set them explicitly at the component level, allowing you to transfer data across accounts. You can find out more [in this document](/docs/guides/cloud/aws).

## Performance

This output benefits from sending multiple messages in flight in parallel for improved performance. You can tune the max number of in flight messages (or message batches) with the field `+"`max_in_flight`"+`.

This output benefits from sending messages as a batch for improved performance. Batches can be formed at both the input and output level. You can find out more [in this doc](/docs/configuration/batching).
`).
		Fields(
			service.NewStringField(ddboFieldTable).
				Description("The table to store messages in."),
			service.NewInterpolatedStringMapField(ddboFieldStringColumns).
				Description("A map of column keys to string values to store.").
				Default(map[string]any{}).
				Example(map[string]any{
					"id":           "${!json(\"id\")}",
					"title":        "${!json(\"body.title\")}",
					"topic":        "${!meta(\"kafka_topic\")}",
					"full_content": "${!content()}",
				}),
			service.NewStringMapField(ddboFieldJSONMapColumns).
				Description("A map of column keys to [field paths](/docs/configuration/field_paths) pointing to value data within messages.").
				Default(map[string]any{}).
				Example(map[string]any{
					"user":           "path.to.user",
					"whole_document": ".",
				}).
				Example(map[string]string{
					"": ".",
				}),
			service.NewStringField(ddboFieldTTL).
				Description("An optional TTL to set for items, calculated from the moment the message is sent.").
				Default("").
				Advanced(),
			service.NewStringField(ddboFieldTTLKey).
				Description("The column key to place the TTL value within.").
				Default("").
				Advanced(),
			service.NewObjectField(ddboFieldDelete,
				service.NewBloblangField(ddboFieldDeleteCondition).
					Description("A bloblang mapping that should return a bool, that will determine if the message will be used to create a Delete rather than Put").
					Version("1.10.0").
					Advanced().
					Example(`root = this.isDelete == "true"`).
					Default(""),
				service.NewStringField(ddboFieldDeletePartitionKey).
					Description("The partition key for DeleteItem requests. Required when `"+ddboFieldDelete+"."+ddboFieldDeleteCondition+"` is true. The value of the key will be resolved from either `"+ddboFieldStringColumns+" or "+ddboFieldJSONMapColumns+"`").
					Version("1.10.0").
					Advanced().
					Default(""),
				service.NewStringField(ddboFieldDeleteSortKey).
					Description("The sort key for DeleteItem requests. The value of the key will be resolved from either `"+ddboFieldStringColumns+" or "+ddboFieldJSONMapColumns+"`").
					Version("1.10.0").
					Advanced().
					Default(""),
			).
				Description("Optional config fields that enable creating Delete requests from messages. If the bloblang mapping provided in `"+ddboFieldDelete+"."+ddboFieldDeleteCondition+"` resolves to true, a delete request for the corresponding partition key will be made.").
				Version("1.10.0").
				Optional().
				Advanced(),
			service.NewOutputMaxInFlightField(),
			service.NewBatchPolicyField(ddboFieldBatching),
		).
		Fields(config.SessionFields()...).
		Fields(retries.CommonRetryBackOffFields(3, "1s", "5s", "30s")...).
		LintRule(`root = match {
this.`+ddboFieldStringColumns+`.length() == 0 && this.`+ddboFieldJSONMapColumns+`.length() == 0 => ["at least one of: `+ddboFieldStringColumns+` or `+ddboFieldJSONMapColumns+` must be specified"],
this.`+ddboFieldDelete+`.`+ddboFieldDeleteCondition+` != "" && this.`+ddboFieldDelete+`.`+ddboFieldDeletePartitionKey+` == "" => ["If you provide a `+ddboFieldDelete+`.`+ddboFieldDeleteCondition+` you must also provide a `+ddboFieldDelete+`.`+ddboFieldDeletePartitionKey+`"]
this.`+ddboFieldDelete+`.`+ddboFieldDeletePartitionKey+` != "" && this.`+ddboFieldDelete+`.`+ddboFieldDeleteCondition+` == "" => ["If you provide a `+ddboFieldDelete+`.`+ddboFieldDeletePartitionKey+` you must also provide a `+ddboFieldDelete+`.`+ddboFieldDeleteCondition+`"]
}`).Example(
		"Delete Requests",
		"In the following example, we will be inserting messages to the table `Music` if the bloblang mapping `root = this.isDelete == true` resolves to `false`, if the bloblang mapping resolves to `true` we will make a delete request for items with the `delete.partition_key` and/or `delete.sort_key`, the values for the `partition_key` and `sort_key` will be found using either the `string_columns` or `json_map_columns`",
		`
output:
  aws_dynamodb:
    table: Music
    json_map_columns:
      uuid: uuid
      title: title
      year: year
    delete:
      condition: |
        root = this.isDelete == true
      partition_key: uuid
`,
	)
}

func init() {
	err := service.RegisterBatchOutput("aws_dynamodb", ddboOutputSpec(),
		func(conf *service.ParsedConfig, mgr *service.Resources) (out service.BatchOutput, batchPolicy service.BatchPolicy, maxInFlight int, err error) {
			if maxInFlight, err = conf.FieldMaxInFlight(); err != nil {
				return
			}
			if batchPolicy, err = conf.FieldBatchPolicy(ddboFieldBatching); err != nil {
				return
			}
			var wConf ddboConfig
			if wConf, err = ddboConfigFromParsed(conf); err != nil {
				return
			}
			out, err = newDynamoDBWriter(wConf, mgr)
			return
		})
	if err != nil {
		panic(err)
	}
}

type dynamoDBAPI interface {
	PutItem(ctx context.Context, params *dynamodb.PutItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.PutItemOutput, error)
	BatchWriteItem(ctx context.Context, params *dynamodb.BatchWriteItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.BatchWriteItemOutput, error)
	BatchExecuteStatement(ctx context.Context, params *dynamodb.BatchExecuteStatementInput, optFns ...func(*dynamodb.Options)) (*dynamodb.BatchExecuteStatementOutput, error)
	DescribeTable(ctx context.Context, params *dynamodb.DescribeTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error)
	GetItem(ctx context.Context, params *dynamodb.GetItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.GetItemOutput, error)
	DeleteItem(ctx context.Context, params *dynamodb.DeleteItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DeleteItemOutput, error)
}

type dynamoDBWriter struct {
	client dynamoDBAPI
	conf   ddboConfig
	log    *service.Logger

	boffPool sync.Pool

	table *string
	ttl   time.Duration
}

func newDynamoDBWriter(conf ddboConfig, mgr *service.Resources) (*dynamoDBWriter, error) {
	db := &dynamoDBWriter{
		conf:  conf,
		log:   mgr.Logger(),
		table: aws.String(conf.Table),
	}
	if len(conf.StringColumns) == 0 && len(conf.JSONMapColumns) == 0 {
		return nil, errors.New("you must provide at least one column")
	}
	for k, v := range conf.JSONMapColumns {
		if v == "." {
			conf.JSONMapColumns[k] = ""
		}
	}
	if conf.TTL != "" {
		ttl, err := time.ParseDuration(conf.TTL)
		if err != nil {
			return nil, fmt.Errorf("failed to parse TTL: %v", err)
		}
		db.ttl = ttl
	}
	db.boffPool = sync.Pool{
		New: func() any {
			return db.conf.backoffCtor()
		},
	}
	return db, nil
}

func (d *dynamoDBWriter) Connect(ctx context.Context) error {
	if d.client != nil {
		return nil
	}

	client := dynamodb.NewFromConfig(d.conf.aconf)
	out, err := client.DescribeTable(ctx, &dynamodb.DescribeTableInput{
		TableName: d.table,
	})
	if err != nil {
		return err
	} else if out == nil || out.Table == nil || out.Table.TableStatus != types.TableStatusActive {
		return fmt.Errorf("dynamodb table '%s' must be active", d.conf.Table)
	}

	d.client = client
	return nil
}

func anyToAttributeValue(root any) types.AttributeValue {
	switch v := root.(type) {
	case map[string]any:
		m := make(map[string]types.AttributeValue, len(v))
		for k, v2 := range v {
			m[k] = anyToAttributeValue(v2)
		}
		return &types.AttributeValueMemberM{
			Value: m,
		}
	case []any:
		l := make([]types.AttributeValue, len(v))
		for i, v2 := range v {
			l[i] = anyToAttributeValue(v2)
		}
		return &types.AttributeValueMemberL{
			Value: l,
		}
	case string:
		return &types.AttributeValueMemberS{
			Value: v,
		}
	case json.Number:
		return &types.AttributeValueMemberS{
			Value: v.String(),
		}
	case float64:
		return &types.AttributeValueMemberN{
			Value: strconv.FormatFloat(v, 'f', -1, 64),
		}
	case int:
		return &types.AttributeValueMemberN{
			Value: strconv.Itoa(v),
		}
	case int64:
		return &types.AttributeValueMemberN{
			Value: strconv.Itoa(int(v)),
		}
	case bool:
		return &types.AttributeValueMemberBOOL{
			Value: v,
		}
	case nil:
		return &types.AttributeValueMemberNULL{
			Value: true,
		}
	}
	return &types.AttributeValueMemberS{
		Value: fmt.Sprintf("%v", root),
	}
}

func jsonToMap(path string, root any) (types.AttributeValue, error) {
	gObj := gabs.Wrap(root)
	if path != "" {
		gObj = gObj.Path(path)
	}
	return anyToAttributeValue(gObj.Data()), nil
}

func (d *dynamoDBWriter) WriteBatch(ctx context.Context, b service.MessageBatch) error {
	if d.client == nil {
		return service.ErrNotConnected
	}

	boff := d.boffPool.Get().(backoff.BackOff)
	defer func() {
		boff.Reset()
		d.boffPool.Put(boff)
	}()

	writeReqs := make([]types.WriteRequest, len(b))

	if err := b.WalkWithBatchedErrors(func(i int, p *service.Message) error {
		if d.conf.DeleteConditionExec == nil {
			return d.addPutRequest(i, &b, writeReqs, p)
		}

		msgWithContext := p.WithContext(ctx)

		result, err := msgWithContext.BloblangQuery(d.conf.DeleteConditionExec)
		if err != nil {
			return fmt.Errorf("delete condition exec error: %w", err)
		}

		resultMsgBytes, err := result.AsBytes()
		if err != nil {
			return fmt.Errorf("delete condition result parse error: %w", err)
		}

		isDelete, err := strconv.ParseBool(string(resultMsgBytes))
		if err != nil {
			return fmt.Errorf("delete condition result parse error: %w", err)
		}

		if isDelete {
			return d.addDeleteRequest(i, &b, writeReqs, p)
		}

		return d.addPutRequest(i, &b, writeReqs, p)

	}); err != nil {
		return err
	}

	batchResult, err := d.client.BatchWriteItem(ctx, &dynamodb.BatchWriteItemInput{
		RequestItems: map[string][]types.WriteRequest{
			*d.table: writeReqs,
		},
	})
	if err != nil {
		headlineErr := err

		// None of the messages were successful, attempt to send individually
	individualRequestsLoop:
		for err != nil {
			batchErr := service.NewBatchError(b, headlineErr)
			for i, req := range writeReqs {
				if req.DeleteRequest != nil {
					if _, iErr := d.client.DeleteItem(ctx, &dynamodb.DeleteItemInput{
						TableName: d.table,
						Key:       req.DeleteRequest.Key,
					}); iErr != nil {
						d.log.Errorf("Delete error: %v\n", iErr)
						wait := boff.NextBackOff()
						if wait == backoff.Stop {
							break individualRequestsLoop
						}
						select {
						case <-time.After(wait):
						case <-ctx.Done():
							break individualRequestsLoop
						}
						batchErr.Failed(i, iErr)
					} else {
						writeReqs[i].DeleteRequest = nil
					}
					continue
				}
				if req.PutRequest == nil {
					continue
				}
				if _, iErr := d.client.PutItem(ctx, &dynamodb.PutItemInput{
					TableName: d.table,
					Item:      req.PutRequest.Item,
				}); iErr != nil {
					d.log.Errorf("Put error: %v\n", iErr)
					wait := boff.NextBackOff()
					if wait == backoff.Stop {
						break individualRequestsLoop
					}
					select {
					case <-time.After(wait):
					case <-ctx.Done():
						break individualRequestsLoop
					}
					batchErr.Failed(i, iErr)
				} else {
					writeReqs[i].PutRequest = nil
				}
			}
			if batchErr.IndexedErrors() == 0 {
				err = nil
			} else {
				err = batchErr
			}
		}
		return err
	}

	unproc := batchResult.UnprocessedItems[*d.table]
unprocessedLoop:
	for len(unproc) > 0 {
		wait := boff.NextBackOff()
		if wait == backoff.Stop {
			break unprocessedLoop
		}

		select {
		case <-time.After(wait):
		case <-ctx.Done():
			break unprocessedLoop
		}
		if batchResult, err = d.client.BatchWriteItem(ctx, &dynamodb.BatchWriteItemInput{
			RequestItems: map[string][]types.WriteRequest{
				*d.table: unproc,
			},
		}); err != nil {
			d.log.Errorf("Write multi error: %v\n", err)
		} else if unproc = batchResult.UnprocessedItems[*d.table]; len(unproc) > 0 {
			err = fmt.Errorf("failed to set %v items", len(unproc))
		} else {
			unproc = nil
		}
	}

	if len(unproc) > 0 {
		if err == nil {
			err = errors.New("ran out of request retries")
		}
	}
	return err
}

func (d *dynamoDBWriter) Close(context.Context) error {
	return nil
}

//------------------------------------------------------------------------------

func (d *dynamoDBWriter) addDeleteRequest(i int, b *service.MessageBatch, writeReqs []types.WriteRequest, p *service.Message) error {

	key := map[string]types.AttributeValue{}

	exprPK, ok := d.conf.StringColumns[d.conf.PartitionKeyDeleteField]
	if !ok {
		jRoot, err := p.AsStructured()
		if err != nil {
			return err
		}
		attr, err := jsonToMap(d.conf.JSONMapColumns[d.conf.PartitionKeyDeleteField], jRoot)
		if err != nil {
			return err
		}
		key[d.conf.PartitionKeyDeleteField] = attr
	} else {
		pk, err := b.TryInterpolatedString(i, exprPK)
		if err != nil {
			return fmt.Errorf("partition key error: %w", err)
		}
		key[d.conf.PartitionKeyDeleteField] = &types.AttributeValueMemberS{Value: pk}
	}

	if d.conf.SortKeyDeleteField != "" {

		exprSK, ok := d.conf.StringColumns[d.conf.SortKeyDeleteField]
		if !ok {
			jRoot, err := p.AsStructured()
			if err != nil {
				return err
			}
			attr, err := jsonToMap(d.conf.JSONMapColumns[d.conf.SortKeyDeleteField], jRoot)
			if err != nil {
				return err
			}
			key[d.conf.SortKeyDeleteField] = attr
		} else {
			sk, err := b.TryInterpolatedString(i, exprSK)
			if err != nil {
				return fmt.Errorf("sort key error: %w", err)
			}
			key[d.conf.SortKeyDeleteField] = &types.AttributeValueMemberS{Value: sk}
		}
	}

	writeReqs[i] = types.WriteRequest{
		DeleteRequest: &types.DeleteRequest{
			Key: key,
		},
	}
	return nil
}

func (d *dynamoDBWriter) addPutRequest(i int, b *service.MessageBatch, writeReqs []types.WriteRequest, p *service.Message) error {
	items := map[string]types.AttributeValue{}
	if d.ttl != 0 && d.conf.TTLKey != "" {
		items[d.conf.TTLKey] = &types.AttributeValueMemberN{
			Value: strconv.FormatInt(time.Now().Add(d.ttl).Unix(), 10),
		}
	}
	for k, v := range d.conf.StringColumns {
		s, err := b.TryInterpolatedString(i, v)
		if err != nil {
			return fmt.Errorf("string column %v interpolation error: %w", k, err)
		}
		items[k] = &types.AttributeValueMemberS{
			Value: s,
		}
	}
	if len(d.conf.JSONMapColumns) > 0 {
		jRoot, err := p.AsStructured()
		if err != nil {
			d.log.Errorf("Failed to extract JSON maps from document: %v", err)
			return err
		}
		for k, v := range d.conf.JSONMapColumns {
			if attr, err := jsonToMap(v, jRoot); err == nil {
				if k == "" {
					if mv, ok := attr.(*types.AttributeValueMemberM); ok {
						for ak, av := range mv.Value {
							items[ak] = av
						}
					} else {
						items[k] = attr
					}
				} else {
					items[k] = attr
				}
			} else {
				d.log.Warnf("Unable to extract JSON map path '%v' from document: %v", v, err)
				return err
			}
		}
	}
	writeReqs[i] = types.WriteRequest{
		PutRequest: &types.PutRequest{
			Item: items,
		},
	}
	return nil
}
