// Copyright 2022 Redpanda Data, Inc.
//
// Use of this software is governed by the Business Source License
// included in the file https://github.com/redpanda-data/redpanda/blob/dev/licenses/bsl.md
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0

package kafka

import (
	"log/slog"
	"net"
	"sync"
	"time"

	"github.com/prometheus/client_golang/prometheus"
	"github.com/twmb/franz-go/pkg/kgo"
)

var ( // interface checks to ensure we implement the hooks properly
	_ kgo.HookBrokerConnect    = (*clientHooks)(nil)
	_ kgo.HookBrokerDisconnect = (*clientHooks)(nil)
	_ kgo.HookBrokerWrite      = (*clientHooks)(nil)
	_ kgo.HookBrokerRead       = (*clientHooks)(nil)
)

// clientHooks implements the various hook interfaces from the franz-go (kafka) library. We can use these hooks to
// log additional information, collect Prometheus metrics and similar.
type clientHooks struct {
	logger *slog.Logger

	requestSentCount prometheus.Counter
	bytesSent        prometheus.Counter

	requestsReceivedCount prometheus.Counter
	bytesReceived         prometheus.Counter

	openConnections    *prometheus.GaugeVec
	connectionAttempts prometheus.Counter
}

var (
	// We may need to initialize client hooks with different
	// loggers multiple times, but we can only register the same
	// Prometheus metrics in the same registry once. Therefore,
	// we store these metrics at the package level per registry and initialize
	// them only once per registry.
	registryMetrics = make(map[prometheus.Registerer]*kafkaMetrics)
	metricsInitMu   sync.RWMutex
)

type kafkaMetrics struct {
	requestSent        prometheus.Counter
	bytesSent          prometheus.Counter
	requestsReceived   prometheus.Counter
	bytesReceived      prometheus.Counter
	openConnections    *prometheus.GaugeVec
	connectionAttempts prometheus.Counter
}

func newClientHooks(logger *slog.Logger, metricsNamespace string, registry prometheus.Registerer) *clientHooks {
	metricsInitMu.Lock()
	defer metricsInitMu.Unlock()

	metrics, exists := registryMetrics[registry]
	if !exists {
		metrics = &kafkaMetrics{
			requestSent: prometheus.NewCounter(prometheus.CounterOpts{
				Namespace: metricsNamespace,
				Subsystem: "kafka",
				Name:      "requests_sent_total",
			}),
			bytesSent: prometheus.NewCounter(prometheus.CounterOpts{
				Namespace: metricsNamespace,
				Subsystem: "kafka",
				Name:      "sent_bytes",
			}),
			requestsReceived: prometheus.NewCounter(prometheus.CounterOpts{
				Namespace: metricsNamespace,
				Subsystem: "kafka",
				Name:      "requests_received_total",
			}),
			bytesReceived: prometheus.NewCounter(prometheus.CounterOpts{
				Namespace: metricsNamespace,
				Subsystem: "kafka",
				Name:      "received_bytes",
			}),
			openConnections: prometheus.NewGaugeVec(prometheus.GaugeOpts{
				Namespace: metricsNamespace,
				Subsystem: "kafka",
				Name:      "open_connections",
				Help:      "Number of open connections to Kafka brokers",
			}, []string{"broker_id"}),
			connectionAttempts: prometheus.NewCounter(prometheus.CounterOpts{
				Namespace: metricsNamespace,
				Subsystem: "kafka",
				Name:      "connection_attempts_total",
				Help:      "Total number of connection attempts to Kafka brokers",
			}),
		}

		registry.MustRegister(
			metrics.requestSent,
			metrics.bytesSent,
			metrics.requestsReceived,
			metrics.bytesReceived,
			metrics.openConnections,
			metrics.connectionAttempts,
		)

		registryMetrics[registry] = metrics
	}

	return &clientHooks{
		logger: logger,

		requestSentCount:      metrics.requestSent,
		bytesSent:             metrics.bytesSent,
		requestsReceivedCount: metrics.requestsReceived,
		bytesReceived:         metrics.bytesReceived,
		openConnections:       metrics.openConnections,
		connectionAttempts:    metrics.connectionAttempts,
	}
}

// OnBrokerConnect is called when the client connects to any node of the target
// Kafka cluster.
func (c clientHooks) OnBrokerConnect(meta kgo.BrokerMetadata, dialDur time.Duration, _ net.Conn, err error) {
	// Track all connection attempts (successful and failed)
	c.connectionAttempts.Inc()

	if err != nil {
		c.logger.Debug("kafka connection failed",
			slog.String("broker_host", meta.Host),
			slog.Any("error", err))
		return
	}
	c.openConnections.WithLabelValues(kgo.NodeName(meta.NodeID)).Inc()
	c.logger.Info("kafka connection succeeded",
		slog.String("host", meta.Host),
		slog.Duration("dial_duration", dialDur),
		slog.Int("node_id", int(meta.NodeID)))
}

// OnBrokerDisconnect is called when the client disconnects from any node of the target
// Kafka cluster.
func (c clientHooks) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) {
	c.openConnections.WithLabelValues(kgo.NodeName(meta.NodeID)).Dec()
	c.logger.Debug("kafka broker disconnected",
		slog.String("host", meta.Host))
}

// OnBrokerRead is passed the broker metadata, the key for the response that
// was read, the number of bytes read, how long the client waited
// before reading the response, how long it took to read the response,
// and any error.
//
// The bytes written does not count any tls overhead.
// OnRead is called after a read from a broker.
func (c clientHooks) OnBrokerRead(_ kgo.BrokerMetadata, _ int16, bytesRead int, _, _ time.Duration, _ error) {
	c.requestsReceivedCount.Inc()
	c.bytesReceived.Add(float64(bytesRead))
}

// OnBrokerWrite is passed the broker metadata, the key for the request that
// was written, the number of bytes written, how long the request
// waited before being written, how long it took to write the request,
// and any error.
//
// The bytes written does not count any tls overhead.
// OnWrite is called after a write to a broker.
func (c clientHooks) OnBrokerWrite(_ kgo.BrokerMetadata, _ int16, bytesWritten int, _, _ time.Duration, _ error) {
	c.requestSentCount.Inc()
	c.bytesSent.Add(float64(bytesWritten))
}
