/*
Copyright (c) 2024 Seldon Technologies Ltd.

Use of this software is governed by
(1) the license included in the LICENSE file or
(2) if the license included in the LICENSE file is the Business Source License 1.1,
the Change License after the Change Date as each is defined in accordance with the LICENSE file.
*/

package gateway

import (
	"bytes"
	"context"
	"fmt"
	"io"
	"math"
	"net"
	"net/http"
	"net/url"
	"strconv"
	"strings"
	"time"

	"github.com/confluentinc/confluent-kafka-go/v2/kafka"
	"github.com/confluentinc/confluent-kafka-go/v2/schemaregistry"
	"github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde"
	"github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/protobuf"
	grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
	"github.com/signalfx/splunk-otel-go/instrumentation/github.com/confluentinc/confluent-kafka-go/v2/kafka/splunkkafka"
	log "github.com/sirupsen/logrus"
	"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
	"go.opentelemetry.io/otel"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/trace"
	"google.golang.org/grpc"
	"google.golang.org/grpc/credentials"
	"google.golang.org/grpc/credentials/insecure"
	"google.golang.org/grpc/metadata"
	"google.golang.org/protobuf/proto"

	"github.com/seldonio/seldon-core/apis/go/v2/mlops/inference_schema"
	v2 "github.com/seldonio/seldon-core/apis/go/v2/mlops/v2_dataplane"

	kafka2 "github.com/seldonio/seldon-core/scheduler/v2/pkg/kafka"
	pipeline "github.com/seldonio/seldon-core/scheduler/v2/pkg/kafka/pipeline"
	seldontracer "github.com/seldonio/seldon-core/scheduler/v2/pkg/tracing"
	"github.com/seldonio/seldon-core/scheduler/v2/pkg/util"
)

type InferWorker struct {
	logger               log.FieldLogger
	grpcClient           v2.GRPCInferenceServiceClient
	httpClient           *http.Client
	consumer             *InferKafkaHandler
	tracer               trace.Tracer
	callOptions          []grpc.CallOption
	topicNamer           *kafka2.TopicNamer
	schemaRegistryClient schemaregistry.Client
}

type InferWork struct {
	modelName string
	headers   map[string]string
	msg       *kafka.Message
	span      trace.Span
}

type V2Error struct {
	Error string `json:"error"`
}

func NewInferWorker(
	consumer *InferKafkaHandler,
	logger log.FieldLogger,
	traceProvider *seldontracer.TracerProvider,
	topicNamer *kafka2.TopicNamer,
	schemaRegistryClient schemaregistry.Client,
) (*InferWorker, error) {
	opts := []grpc.CallOption{
		grpc.MaxCallSendMsgSize(math.MaxInt32),
		grpc.MaxCallRecvMsgSize(math.MaxInt32),
	}
	iw := &InferWorker{
		logger:               logger.WithField("source", "KafkaInferWorker"),
		httpClient:           util.GetHttpClientFromTLSOptions(consumer.tlsClientOptions),
		consumer:             consumer,
		tracer:               traceProvider.GetTraceProvider().Tracer("Worker"),
		callOptions:          opts,
		topicNamer:           topicNamer,
		schemaRegistryClient: schemaRegistryClient,
	}
	// Create gRPC clients
	grpcClient, err := iw.getGrpcClient(
		consumer.consumerConfig.InferenceServerConfig.Host,
		consumer.consumerConfig.InferenceServerConfig.GrpcPort,
	)
	if err != nil {
		return nil, err
	}
	iw.grpcClient = grpcClient

	return iw, nil
}

func getRestUrl(tls bool, host string, port int, modelName string) *url.URL {
	scheme := "http"
	if tls {
		scheme = "https"
	}
	return &url.URL{
		Scheme: scheme,
		Host:   net.JoinHostPort(host, strconv.Itoa(port)),
		Path:   fmt.Sprintf("/v2/models/%s/infer", modelName),
	}
}

func (iw *InferWorker) getGrpcClient(host string, port int) (v2.GRPCInferenceServiceClient, error) {
	logger := iw.logger.WithField("func", "getGrpcClient")
	retryOpts := []grpc_retry.CallOption{
		grpc_retry.WithBackoff(grpc_retry.BackoffExponential(util.GRPCRetryBackoff)),
		grpc_retry.WithMax(util.GRPCRetryMaxCount), // retry envoy connection
	}

	var creds credentials.TransportCredentials
	if iw.consumer.tlsClientOptions.TLS {
		logger.Info("Creating TLS credentials")
		creds = iw.consumer.tlsClientOptions.Cert.CreateClientTransportCredentials()
	} else {
		logger.Info("Creating insecure credentials")
		creds = insecure.NewCredentials()
	}

	opts := []grpc.DialOption{
		grpc.WithKeepaliveParams(util.GetClientKeepAliveParameters()),
		grpc.WithTransportCredentials(creds),
		grpc.WithDefaultCallOptions(
			grpc.MaxCallRecvMsgSize(util.GRPCMaxMsgSizeBytes),
			grpc.MaxCallSendMsgSize(util.GRPCMaxMsgSizeBytes),
		),
		grpc.WithStatsHandler(
			otelgrpc.NewClientHandler(),
		),
		grpc.WithUnaryInterceptor(
			grpc_retry.UnaryClientInterceptor(retryOpts...),
		),
	}

	conn, err := grpc.NewClient(fmt.Sprintf("%s:%d", host, port), opts...)
	if err != nil {
		return nil, err
	}

	return v2.NewGRPCInferenceServiceClient(conn), nil
}

func getProtoInferRequest(job *InferWork) (*v2.ModelInferRequest, error) {
	ireq := v2.ModelInferRequest{}
	err := proto.Unmarshal(job.msg.Value, &ireq)
	if err != nil {
		iresp := v2.ModelInferResponse{}
		err := proto.Unmarshal(job.msg.Value, &iresp)
		if err != nil {
			return nil, err
		}
		return chainProtoResponseToRequest(&iresp), nil
	}
	return &ireq, nil
}

func (iw *InferWorker) Start(jobChan <-chan *InferWork, cancelChan <-chan struct{}, inferTimeout int) {
	for {
		select {
		case <-cancelChan:
			return

		case job := <-jobChan:
			if job.span != nil {
				// records how long we had to wait for a worker to pick up the job
				job.span.End()
			}
			ctx := createBaseContextFromKafkaMsg(job.msg)
			err := iw.processRequest(ctx, job, time.Duration(inferTimeout)*time.Millisecond)
			if err != nil {
				iw.logger.WithError(err).Errorf("Failed to process request for model %s", job.modelName)
			}
		}
	}
}

func (iw *InferWorker) processRequest(ctx context.Context, job *InferWork, timeout time.Duration) error {
	ctxWithTimeout, cancel := context.WithTimeout(ctx, timeout)
	defer cancel()

	// Has Type Header
	if typeValue, ok := job.headers[HeaderKeyType]; ok {
		switch typeValue {
		case HeaderValueJsonReq:
			return iw.restRequest(ctxWithTimeout, job, false)
		case HeaderValueJsonRes:
			return iw.restRequest(ctxWithTimeout, job, true)
		case HeaderValueProtoReq:
			protoRequest, err := getProtoInferRequest(job)
			if err != nil {
				return err
			}
			return iw.grpcRequest(ctxWithTimeout, job, protoRequest)
		case HeaderValueProtoRes:
			protoRequest, err := getProtoRequestAssumingResponse(job.msg.Value)
			if err != nil {
				return err
			}
			return iw.grpcRequest(ctxWithTimeout, job, protoRequest)
		default:
			return fmt.Errorf("Header %s with unknown type %s", HeaderKeyType, typeValue)
		}
	} else { // Does not have type header - this is the general case to allow easy use
		protoRequest, err := getProtoInferRequest(job)
		if err != nil {
			return iw.restRequest(ctxWithTimeout, job, true)
		} else {
			return iw.grpcRequest(ctxWithTimeout, job, protoRequest)
		}
	}
}

func existsKafkaHeader(headers []kafka.Header, key string, val string) bool {
	for _, header := range headers {
		if header.Key == key && string(header.Value) == val {
			return true
		}
	}
	return false
}

func (iw *InferWorker) produce(
	ctx context.Context,
	job *InferWork,
	topic string,
	b []byte,
	errorTopic bool,
	headers map[string][]string,
) error {
	logger := iw.logger.WithField("func", "produce")

	kafkaHeaders := job.msg.Headers
	if errorTopic {
		kafkaHeaders = append(kafkaHeaders, kafka.Header{Key: kafka2.TopicErrorHeader, Value: []byte(job.modelName)})
	}

	for k, vs := range headers {
		for _, v := range vs {
			if !existsKafkaHeader(kafkaHeaders, k, v) {
				logger.Debugf("Adding header to kafka response %s:%s", k, v)
				kafkaHeaders = append(kafkaHeaders, kafka.Header{Key: k, Value: []byte(v)})
			}
		}
	}

	if logger.Logger.IsLevelEnabled(log.DebugLevel) {
		for _, h := range kafkaHeaders {
			logger.Debugf("Adding kafka header for topic %s %s:%s", topic, h.Key, string(h.Value))
		}
	}
	logger.Debugf("Produce response to topic %s on partition %d", topic, job.msg.TopicPartition.Partition)

	if iw.schemaRegistryClient != nil && !errorTopic {
		payloadWithSchemaID, err := iw.serializeModelInferRespWithSchemaRegistry(topic, b)
		if err != nil {
			logger.Warnf("Failed to serialize model inference response with a schema id on topic %s "+
				"defaulting to sending without schema id with err: %v", topic, err)
		}
		if err == nil {
			b = payloadWithSchemaID
		}
	}

	msg := &kafka.Message{
		TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: job.msg.TopicPartition.Partition},
		Key:            job.msg.Key,
		Value:          b,
		Headers:        kafkaHeaders,
	}

	ctx, span := iw.tracer.Start(ctx, "Produce")
	requestId := pipeline.GetRequestIdFromKafkaHeaders(kafkaHeaders)
	if requestId == "" {
		logger.Warnf("Missing request id in Kafka headers for key %s", string(job.msg.Key))
	}
	span.SetAttributes(attribute.String(util.RequestIdHeader, requestId))
	carrierOut := splunkkafka.NewMessageCarrier(msg)
	otel.GetTextMapPropagator().Inject(ctx, carrierOut)

	deliveryChan := make(chan kafka.Event)
	err := iw.consumer.Produce(msg, deliveryChan)
	if err != nil {
		iw.logger.WithError(err).Errorf("Failed to produce response for model %s", topic)
		return err
	}
	go func() {
		e := <-deliveryChan
		span.End()
		m := e.(*kafka.Message)
		if m.TopicPartition.Error != nil {
			iw.logger.WithError(m.TopicPartition.Error).Errorf("Failed to produce event for model %s", topic)
		}
		close(deliveryChan)
	}()

	return nil
}

func (iw *InferWorker) restRequest(ctx context.Context, job *InferWork, maybeConvert bool) error {
	logger := iw.logger.WithField("func", "restRequest")

	restUrl := getRestUrl(
		iw.consumer.tlsClientOptions.TLS,
		iw.consumer.consumerConfig.InferenceServerConfig.Host,
		iw.consumer.consumerConfig.InferenceServerConfig.HttpPort,
		job.modelName,
	)

	logger.Debugf("REST request to %s for %s", restUrl.String(), job.modelName)

	data := job.msg.Value
	if maybeConvert {
		data = maybeChainRest(job.msg.Value)
	}
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, restUrl.String(), bytes.NewBuffer(data))
	if err != nil {
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}

	req.Header.Set("Content-Type", "application/json")
	req.Header.Set(util.SeldonModelHeader, job.modelName)
	if reqId, ok := job.headers[util.RequestIdHeader]; ok {
		req.Header[util.RequestIdHeader] = []string{reqId}
	}

	response, err := iw.httpClient.Do(req)
	if err != nil {
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}

	b, err := io.ReadAll(response.Body)
	if err != nil {
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}

	err = response.Body.Close()
	if err != nil {
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}

	iw.logger.Infof("v2 server response: %s", b)

	if response.StatusCode != http.StatusOK {
		logger.Warnf("Failed infer request with status code %d and payload %s", response.StatusCode, string(b))
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), b, true, nil)
	}

	err = iw.produce(
		ctx,
		job,
		iw.topicNamer.GetModelTopicOutputs(job.modelName),
		b,
		false,
		extractHeadersHttp(response.Header),
	)
	if err != nil {
		logger.WithError(err).Errorf("Failed infer request iw.produce")
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}
	return nil
}

// Add all external headers to request metadata
func addMetadataToOutgoingContext(ctx context.Context, job *InferWork, logger log.FieldLogger) context.Context {
	for k, v := range job.headers {
		if strings.HasPrefix(k, util.ExternalHeaderPrefix) &&
			k != util.SeldonRouteHeader { // We don;t want to send x-seldon-route as this will confuse envoy
			logger.Debugf("Adding outgoing ctx metadata %s:%s", k, v)
			ctx = metadata.AppendToOutgoingContext(ctx, k, v)
		}
	}
	ctx = metadata.AppendToOutgoingContext(ctx, util.SeldonModelHeader, job.modelName)
	return ctx
}

func (iw *InferWorker) grpcRequest(ctx context.Context, job *InferWork, req *v2.ModelInferRequest) error {
	logger := iw.logger.WithField("func", "grpcRequest")
	logger.Debugf("gRPC request for %s", job.modelName)

	//Update req with correct modelName
	req.ModelName = job.modelName
	req.ModelVersion = fmt.Sprintf("%d", util.GetPinnedModelVersion())

	ctx = addMetadataToOutgoingContext(ctx, job, logger)

	var header, trailer metadata.MD
	opts := append(iw.callOptions, grpc.Header(&header))
	opts = append(opts, grpc.Trailer(&trailer))

	resp, err := iw.grpcClient.ModelInfer(ctx, req, opts...)

	if err != nil {
		logger.WithError(err).Warnf("Failed infer request")
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}
	b, err := proto.Marshal(resp)
	if err != nil {
		logger.WithError(err).Errorf("Failed to proto.Marshal")
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}

	err = iw.produce(
		ctx,
		job,
		iw.topicNamer.GetModelTopicOutputs(job.modelName),
		b,
		false,
		extractHeadersGrpc(header, trailer),
	)
	if err != nil {
		logger.WithError(err).Errorf("Failed infer request iw.produce")
		return iw.produce(ctx, job, iw.topicNamer.GetModelErrorTopic(), []byte(err.Error()), true, nil)
	}
	return nil
}

func (iw *InferWorker) serializeModelInferRespWithSchemaRegistry(topic string, payload []byte) ([]byte, error) {
	logger := iw.logger.WithField("func", "serializeModelInferRespWithSchemaRegistry")

	if len(payload) > 10 {
		logger.Trace("first 10 bytes before schema serialisation")
		for _, b := range payload[:10] {
			logger.Tracef("%02x", b)
		}
		logger.Trace("last 10 bytes before schema serialisation")
		for _, b := range payload[len(payload)-10:] {
			logger.Tracef("%02x", b)
		}
	}

	v2Res := &inference_schema.ModelInferResponse{}
	err := proto.Unmarshal(payload, v2Res)
	if err != nil {
		return nil, fmt.Errorf("failed to unmarshal response to dataplane model: %w", err)
	}

	schemaConfig := protobuf.NewSerializerConfig()
	schemaConfig.NormalizeSchemas = true

	ser, err := protobuf.NewSerializer(iw.schemaRegistryClient, serde.ValueSerde, schemaConfig)
	if err != nil {
		return nil, fmt.Errorf("failed to obtain a serialiser: %w", err)
	}

	serializedPayload, err := ser.Serialize(topic, v2Res)
	if err != nil {
		return nil, fmt.Errorf("failed to serialise response to dataplane model with schema id: %w", err)
	}

	iw.logger.Debugf("first 10 bytes after schema serialisation")
	if len(payload) > 10 {
		logger.Trace("first 10 bytes before schema serialisation")
		for _, b := range payload[:10] {
			logger.Tracef("%02x", b)
		}
		logger.Trace("last 10 bytes before schema serialisation")
		for _, b := range payload[len(payload)-10:] {
			logger.Tracef("%02x", b)
		}
	}
	return serializedPayload, nil
}

// this is redundant code but is kept there to avoid circular dependencies
// todo: refactor tracing pkg in general and remove this
func createBaseContextFromKafkaMsg(msg *kafka.Message) context.Context {
	// these are just a base context for a new span
	// callers should add timeout, etc for this context as they see fit.
	ctx := context.Background()
	carrierIn := splunkkafka.NewMessageCarrier(msg)
	return otel.GetTextMapPropagator().Extract(ctx, carrierIn)
}
