/*
Copyright (c) 2024 Seldon Technologies Ltd.

Use of this software is governed by
(1) the license included in the LICENSE file or
(2) if the license included in the LICENSE file is the Business Source License 1.1,
the Change License after the Change Date as each is defined in accordance with the LICENSE file.
*/

package pipeline

import (
	"context"
	"crypto/tls"
	"errors"
	"fmt"
	"io"
	"net"
	"net/http"
	"time"

	"github.com/gorilla/mux"
	log "github.com/sirupsen/logrus"
	"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/trace"

	"github.com/seldonio/seldon-core/scheduler/v2/pkg/kafka/pipeline/status"
	"github.com/seldonio/seldon-core/scheduler/v2/pkg/metrics"
	"github.com/seldonio/seldon-core/scheduler/v2/pkg/util"
)

const (
	ResourceNameVariable = "model"
	v2ModelPathPrefix    = "/v2/models/"
	v2PipelinePathPrefix = "/v2/pipelines/"
	healthCheckPath      = "/ready"
)

type GatewayHttpServer struct {
	port                 int
	router               *mux.Router
	server               *http.Server
	logger               log.FieldLogger
	gateway              PipelineInferer
	metrics              metrics.PipelineMetricsHandler
	tlsOptions           *util.TLSOptions
	pipelineReadyChecker status.PipelineReadyChecker
}

type TLSDetails struct {
	CertMountPath string
	CertFilename  string
	KeyFilename   string
}

func NewGatewayHttpServer(port int, logger log.FieldLogger,
	gateway PipelineInferer,
	metrics metrics.PipelineMetricsHandler,
	tlsOptions *util.TLSOptions,
	pipelineReadyChecker status.PipelineReadyChecker) *GatewayHttpServer {
	return &GatewayHttpServer{
		port:                 port,
		router:               mux.NewRouter(),
		logger:               logger.WithField("source", "GatewayHttpServer"),
		gateway:              gateway,
		metrics:              metrics,
		tlsOptions:           tlsOptions,
		pipelineReadyChecker: pipelineReadyChecker,
	}
}

func (g *GatewayHttpServer) Stop() error {
	ctx, cancel := context.WithTimeout(context.Background(), util.ServerControlPlaneTimeout)
	defer cancel()
	return g.server.Shutdown(ctx)
}

func (g *GatewayHttpServer) Start() error {
	logger := g.logger.WithField("func", "Start")
	logger.Infof("Starting http server on port %d", g.port)
	g.setupRoutes()
	g.server = &http.Server{
		Handler:     g.router,
		IdleTimeout: 65 * time.Second,
	}
	lis := g.createListener()
	return g.server.Serve(lis)
}

func (g *GatewayHttpServer) createListener() net.Listener {
	// Create a listener at the desired port.
	var lis net.Listener
	var err error
	if g.tlsOptions.TLS {
		g.logger.Infof("Creating TLS listener on port %d", g.port)

		lis, err = tls.Listen("tcp", fmt.Sprintf(":%d", g.port), g.tlsOptions.Cert.CreateServerTLSConfig())
		if err != nil {
			log.Fatalf("failed to create listener: %v", err)
		}
	} else {
		g.logger.Infof("Creating non-TLS listener port %d", g.port)
		lis, err = net.Listen("tcp", fmt.Sprintf(":%d", g.port))
		if err != nil {
			log.Fatalf("failed to create listener: %v", err)
		}
	}
	return lis
}

func (g *GatewayHttpServer) setupRoutes() {
	// TODO we seem to always enforce tracing middleware even if tracing is not enabled via configmap?? needless latency
	g.router.Use(mux.CORSMethodMiddleware(g.router))
	g.router.Use(otelmux.Middleware("pipelinegateway"))
	g.router.NewRoute().Path(
		v2ModelPathPrefix + "{" + ResourceNameVariable + "}/infer").HandlerFunc(g.inferModel)
	g.router.NewRoute().Path(
		v2PipelinePathPrefix + "{" + ResourceNameVariable + "}/infer").HandlerFunc(g.inferPipeline)
	g.router.NewRoute().Path(
		v2ModelPathPrefix + "{" + ResourceNameVariable + "}/ready").HandlerFunc(g.pipelineReadyFromModelPath)
	g.router.NewRoute().Path(
		v2PipelinePathPrefix + "{" + ResourceNameVariable + "}/ready").HandlerFunc(g.pipelineReadyFromPipelinePath)
	g.setupHealthRoute()
}

func (g *GatewayHttpServer) setupHealthRoute() {
	g.router.NewRoute().Path(healthCheckPath).HandlerFunc(g.healthCheck)
}

func (g *GatewayHttpServer) HealthPath() string {
	return healthCheckPath
}

// Get or create a request ID
func (g *GatewayHttpServer) getRequestId(req *http.Request) string {
	requestIds := req.Header[util.RequestIdHeaderCanonical]
	var requestId string
	if len(requestIds) > 0 {
		requestId = requestIds[0]
	} else {
		g.logger.Debug("Failed to find request ID - will generate one")
		requestId = util.CreateRequestId()
	}
	return requestId
}

func (g *GatewayHttpServer) infer(w http.ResponseWriter, req *http.Request, resourceName string, isModel bool) {
	logger := g.logger.WithField("func", "infer")
	startTime := time.Now()
	data, err := io.ReadAll(req.Body)
	if err != nil {
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	dataProto, err := ConvertRequestToV2Bytes(data, "", "")
	if err != nil {
		logger.WithError(err).Errorf("Failed to convert bytes to v2 request for resource %s", resourceName)
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	kafkaRequest, err := g.gateway.Infer(req.Context(), resourceName, isModel, dataProto, convertHttpHeadersToKafkaHeaders(req.Header), g.getRequestId(req))
	elapsedTime := time.Since(startTime).Seconds()

	if err != nil {
		logger.WithError(err).Error("Failed to call infer")
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	w.Header().Set(util.RequestIdHeader, kafkaRequest.key)
	for k, vals := range convertKafkaHeadersToHttpHeaders(kafkaRequest.headers) {
		for _, val := range vals {
			w.Header().Add(k, val)
		}
	}

	if kafkaRequest.err != nil {
		logger.WithField("resp_body", kafkaRequest.response).Error("Got upstream error after publishing req")
		w.WriteHeader(http.StatusBadRequest)
		_, err = w.Write(createResponseErrorPayload(kafkaRequest.err, kafkaRequest.response))
		if err != nil {
			logger.WithError(err).Error("Failed to write error payload")
		}
		return
	}

	resJson, err := ConvertV2ResponseBytesToJson(kafkaRequest.response)
	if err != nil {
		logger.WithError(err).Errorf("Failed to convert v2 response to json for resource %s", resourceName)
		go g.metrics.AddPipelineInferMetrics(resourceName, metrics.MethodTypeRest, elapsedTime, metrics.HttpCodeToString(http.StatusInternalServerError))
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	_, err = w.Write(resJson)
	if err != nil {
		w.WriteHeader(http.StatusInternalServerError)
	} else {
		go g.metrics.AddPipelineInferMetrics(resourceName, metrics.MethodTypeRest, elapsedTime, metrics.HttpCodeToString(http.StatusOK))
	}
}

func getResourceFromHeaders(req *http.Request, logger log.FieldLogger) (string, bool, error) {
	modelHeader := req.Header.Get(util.SeldonModelHeader)
	// may have multiple header values due to shadow/mirror processing
	modelInternalHeader := req.Header.Values(util.SeldonInternalModelHeader)
	logger.Debugf("Seldon model header %s and seldon internal model header %s", modelHeader, modelInternalHeader)
	if len(modelInternalHeader) > 0 {
		return createResourceNameFromHeader(modelInternalHeader[len(modelInternalHeader)-1]) // get last header if multiple
	} else {
		return createResourceNameFromHeader(modelHeader)
	}
}

func (g *GatewayHttpServer) inferModel(w http.ResponseWriter, req *http.Request) {
	g.traceReqID(req)

	logger := g.logger.WithField("func", "inferModel")
	resourceName, isModel, err := getResourceFromHeaders(req, logger)
	if err != nil {
		logger.WithError(err)
		w.WriteHeader(http.StatusBadRequest)
		return
	}
	g.infer(w, req, resourceName, isModel)
}

func (g *GatewayHttpServer) traceReqID(req *http.Request) {
	if id := req.Header.Get(util.RequestIdHeader); id != "" {
		span := trace.SpanFromContext(req.Context())
		span.SetAttributes(
			attribute.String(util.RequestIdHeader, id),
		)
	}
}

func (g *GatewayHttpServer) inferPipeline(w http.ResponseWriter, req *http.Request) {
	g.traceReqID(req)

	logger := g.logger.WithField("func", "inferPipeline")
	resourceName, isModel, err := getResourceFromHeaders(req, logger)
	if err != nil {
		logger.Error("No header found for pipeline identification")
		w.WriteHeader(http.StatusBadRequest)
		return
	}
	g.infer(w, req, resourceName, isModel)
}

func (g *GatewayHttpServer) pipelineReady(w http.ResponseWriter, req *http.Request, resourceName string) {
	logger := g.logger.WithField("func", "pipelineReady")
	ready, err := g.pipelineReadyChecker.CheckPipelineReady(req.Context(), resourceName, g.getRequestId(req))
	if err != nil {
		if errors.Is(err, status.PipelineNotFoundErr) {
			w.WriteHeader(http.StatusNotFound)
		} else {
			logger.WithError(err).Errorf("Failed to get pipeline readines for pipeline %s", resourceName)
			w.WriteHeader(http.StatusInternalServerError)
		}
	} else {
		if ready {
			w.WriteHeader(http.StatusOK)
		} else {
			w.WriteHeader(http.StatusFailedDependency)
		}
	}
}

func (g *GatewayHttpServer) healthCheck(w http.ResponseWriter, _ *http.Request) {
	w.WriteHeader(http.StatusOK)
}

func (g *GatewayHttpServer) pipelineReadyFromPipelinePath(w http.ResponseWriter, req *http.Request) {
	vars := mux.Vars(req)
	resourceName := vars[ResourceNameVariable]
	g.pipelineReady(w, req, resourceName)

}

func (g *GatewayHttpServer) pipelineReadyFromModelPath(w http.ResponseWriter, req *http.Request) {
	logger := g.logger.WithField("func", "inferModel")
	resourceName, isModel, err := getResourceFromHeaders(req, logger)
	if err != nil {
		logger.WithError(err).Error("Failed to create resource name from header")
		w.WriteHeader(http.StatusBadRequest)
		return
	}
	if isModel {
		logger.Errorf("Model ready call to pipeline gateway. Will ignore")
		w.WriteHeader(http.StatusBadRequest)
		return
	}
	g.pipelineReady(w, req, resourceName)
}
