package kafka

import (
	"context"
	"errors"
	"fmt"
	"strings"
	"sync"
	"time"

	"github.com/wundergraph/cosmo/router/pkg/metric"

	"github.com/twmb/franz-go/pkg/kerr"
	"github.com/twmb/franz-go/pkg/kgo"
	"github.com/wundergraph/cosmo/router/pkg/pubsub/datasource"
	"go.uber.org/zap"
)

var (
	errClientClosed = errors.New("client closed")
)

// Ensure ProviderAdapter implements Adapter
var _ datasource.Adapter = (*ProviderAdapter)(nil)

const (
	kafkaReceive = "receive"
	kafkaProduce = "produce"
)

// ProviderAdapter is a Kafka pubsub implementation.
// It uses the franz-go Kafka client to consume and produce messages.
// The pubsub is stateless and does not store any messages.
// It uses a single write client to produce messages and a client per topic to consume messages.
// Each client polls the Kafka topic for new records and updates the subscriptions with the new data.
type ProviderAdapter struct {
	ctx               context.Context
	opts              []kgo.Opt
	logger            *zap.Logger
	writeClient       *kgo.Client
	closeWg           sync.WaitGroup
	cancel            context.CancelFunc
	streamMetricStore metric.StreamMetricStore
}

type PollerOpts struct {
	providerId string
}

// topicPoller polls the Kafka topic for new records and calls the updateTriggers function.
func (p *ProviderAdapter) topicPoller(ctx context.Context, client *kgo.Client, updater datasource.SubscriptionEventUpdater, pollerOpts PollerOpts) error {
	for {
		select {
		case <-p.ctx.Done(): // Close the poller if the application context was canceled
			return p.ctx.Err()
		case <-ctx.Done(): // Close the poller if the subscription context was canceled
			return ctx.Err()

		default:
			// Try to fetch max records from any subscribed topics
			fetches := client.PollRecords(p.ctx, 10_000)
			if fetches.IsClientClosed() {
				return errClientClosed
			}

			if errs := fetches.Errors(); len(errs) > 0 {

				for _, fetchError := range errs {

					// If the context was canceled, the error is wrapped in a fetch error
					if errors.Is(fetchError.Err, context.Canceled) {
						return fetchError.Err
					}

					var kErr *kerr.Error
					if errors.As(fetchError.Err, &kErr) {
						if !kErr.Retriable {
							p.logger.Error("unrecoverable fetch error",
								zap.Error(fetchError.Err),
								zap.String("topic", fetchError.Topic),
							)

							// If the error is not recoverable, return it and abort the poller
							return fetchError.Err
						}
					} else {
						p.logger.Error("fetch error", zap.Error(fetchError.Err), zap.String("topic", fetchError.Topic))
					}
				}
			}

			iter := fetches.RecordIter()
			for !iter.Done() {
				r := iter.Next()

				p.logger.Debug("subscription update", zap.String("topic", r.Topic), zap.ByteString("data", r.Value))

				headers := make(map[string][]byte)
				for _, header := range r.Headers {
					headers[header.Key] = header.Value
				}

				p.streamMetricStore.Consume(p.ctx, metric.StreamsEvent{
					ProviderId:          pollerOpts.providerId,
					StreamOperationName: kafkaReceive,
					ProviderType:        metric.ProviderTypeKafka,
					DestinationName:     r.Topic,
				})

				updater.Update([]datasource.StreamEvent{
					&Event{
						evt: &MutableEvent{
							Data:    r.Value,
							Headers: headers,
							Key:     r.Key,
						},
					},
				})
			}
		}
	}
}

// Subscribe subscribes to the given topics and updates the subscription updater.
// The engine already deduplicates subscriptions with the same topics, stream configuration, extensions, headers, etc.
func (p *ProviderAdapter) Subscribe(ctx context.Context, conf datasource.SubscriptionEventConfiguration, updater datasource.SubscriptionEventUpdater) error {
	subConf, ok := conf.(*SubscriptionEventConfiguration)
	if !ok {
		return datasource.NewError("invalid event type for Kafka adapter", nil)
	}

	log := p.logger.With(
		zap.String("provider_id", conf.ProviderID()),
		zap.String("method", "subscribe"),
		zap.Strings("topics", subConf.Topics),
	)

	// Create a new client for the topic
	client, err := kgo.NewClient(append(p.opts,
		kgo.ConsumeTopics(subConf.Topics...),
		// We want to consume the events produced after the first subscription was created
		// Messages are shared among all subscriptions, therefore old events are not redelivered
		// This replicates a stateless publish-subscribe model
		kgo.ConsumeResetOffset(kgo.NewOffset().AfterMilli(time.Now().UnixMilli())),
		// For observability, we set the client ID to "router"
		kgo.ClientID(fmt.Sprintf("cosmo.router.consumer.%s", strings.Join(subConf.Topics, "-"))),
		// FIXME: the client id should have some unique identifier, like in nats
		// What if we have multiple subscriptions for the same topics?
		// What if we have more router instances?
	)...)
	if err != nil {
		log.Error("failed to create client", zap.Error(err))
		return err
	}

	p.closeWg.Add(1)

	go func() {

		defer p.closeWg.Done()

		err := p.topicPoller(ctx, client, updater, PollerOpts{providerId: conf.ProviderID()})
		if err != nil {
			if errors.Is(err, errClientClosed) || errors.Is(err, context.Canceled) {
				log.Debug("poller canceled", zap.Error(err))
			} else {
				log.Error(
					"poller error",
					zap.Error(err),
					zap.String("provider_id", conf.ProviderID()),
					zap.String("provider_type", string(conf.ProviderType())),
					zap.String("field_name", conf.RootFieldName()),
				)
			}
			return
		}
	}()

	return nil
}

// Publish publishes the given events to the Kafka topic in a non-blocking way.
// Publish errors are logged and returned as a pubsub error.
// The events are written with a dedicated write client.
func (p *ProviderAdapter) Publish(ctx context.Context, conf datasource.PublishEventConfiguration, events []datasource.StreamEvent) error {
	pubConf, ok := conf.(*PublishEventConfiguration)
	if !ok {
		return datasource.NewError("invalid event type for Kafka adapter", nil)
	}

	log := p.logger.With(
		zap.String("provider_id", conf.ProviderID()),
		zap.String("method", "publish"),
		zap.String("topic", pubConf.Topic),
	)

	if p.writeClient == nil {
		return datasource.NewError("kafka write client not initialized", nil)
	}

	if len(events) == 0 {
		return nil
	}

	log.Debug("publish", zap.Int("event_count", len(events)))

	var wg sync.WaitGroup
	wg.Add(len(events))

	var errs []error
	var errMutex sync.Mutex

	for _, streamEvent := range events {
		evt, err := castToMutableEvent(streamEvent)
		if err != nil {
			wg.Done()
			errMutex.Lock()
			errs = append(errs, err)
			errMutex.Unlock()
			continue
		}

		headers := make([]kgo.RecordHeader, 0, len(evt.Headers))
		for key, value := range evt.Headers {
			headers = append(headers, kgo.RecordHeader{
				Key:   key,
				Value: value,
			})
		}

		p.writeClient.Produce(ctx, &kgo.Record{
			Key:     evt.Key,
			Topic:   pubConf.Topic,
			Value:   evt.Data,
			Headers: headers,
		}, func(record *kgo.Record, err error) {
			defer wg.Done()
			if err != nil {
				errMutex.Lock()
				errs = append(errs, err)
				errMutex.Unlock()
			}
		})
	}

	wg.Wait()

	// Produce metrics for all failed and successfully published events
	successCount := len(events) - len(errs)
	for range successCount {
		p.streamMetricStore.Produce(ctx, metric.StreamsEvent{
			ProviderId:          pubConf.ProviderID(),
			StreamOperationName: kafkaProduce,
			ProviderType:        metric.ProviderTypeKafka,
			DestinationName:     pubConf.Topic,
		})
	}
	for range len(errs) {
		p.streamMetricStore.Produce(ctx, metric.StreamsEvent{
			ProviderId:          pubConf.ProviderID(),
			StreamOperationName: kafkaProduce,
			ProviderType:        metric.ProviderTypeKafka,
			ErrorType:           "publish_error",
			DestinationName:     pubConf.Topic,
		})
	}

	// Log all errors, if any, as a single entry and return error
	if len(errs) > 0 {
		combinedErr := errors.Join(errs...)
		log.Error("publish errors", zap.Error(combinedErr), zap.Int("failed_count", len(errs)), zap.Int("total_count", len(events)))
		return datasource.NewError(
			fmt.Sprintf("error publishing %d/%d events to Kafka topic %s", len(errs), len(events), pubConf.Topic), combinedErr,
		)
	}

	return nil
}

func (p *ProviderAdapter) Startup(ctx context.Context) (err error) {
	p.writeClient, err = kgo.NewClient(append(p.opts,
		// For observability, we set the client ID to "router"
		kgo.ClientID("cosmo.router.producer"))...,
	)
	if err != nil {
		return err
	}

	return
}

func (p *ProviderAdapter) Shutdown(ctx context.Context) error {

	if p.writeClient == nil {
		return nil
	}

	err := p.writeClient.Flush(ctx)
	if err != nil {
		p.logger.Error("flushing write client", zap.Error(err))
	}

	p.writeClient.Close()

	// Cancel the context to stop all pollers
	p.cancel()

	// Wait until all pollers are closed
	p.closeWg.Wait()

	if err != nil {
		return fmt.Errorf("kafka pubsub shutdown: %w", err)
	}

	return nil
}

func NewProviderAdapter(ctx context.Context, logger *zap.Logger, opts []kgo.Opt, providerOpts datasource.ProviderOpts) (*ProviderAdapter, error) {
	ctx, cancel := context.WithCancel(ctx)
	if logger == nil {
		logger = zap.NewNop()
	}

	var store metric.StreamMetricStore
	if providerOpts.StreamMetricStore != nil {
		store = providerOpts.StreamMetricStore
	} else {
		store = metric.NewNoopStreamMetricStore()
	}

	return &ProviderAdapter{
		ctx:               ctx,
		logger:            logger.With(zap.String("pubsub", "kafka")),
		opts:              opts,
		closeWg:           sync.WaitGroup{},
		cancel:            cancel,
		streamMetricStore: store,
	}, nil
}

func castToMutableEvent(event datasource.StreamEvent) (*MutableEvent, error) {
	switch evt := event.(type) {
	case *Event:
		return evt.evt, nil
	case *MutableEvent:
		return evt, nil
	default:
		return nil, errors.New("invalid event type for Kafka adapter")
	}
}
