//go:build kafka
// +build kafka

package connector

import (
	"context"
	"fmt"
	"sync"
	"time"

	"github.com/Shopify/sarama"
)

// KafkaSourceConnector represents a Kafka source connector
type KafkaSourceConnector struct {
	// config is the connector configuration
	config map[string]interface{}
	// consumer is the Kafka consumer
	consumer sarama.Consumer
	// partitionConsumers are the partition consumers
	partitionConsumers []sarama.PartitionConsumer
	// metrics are the connector metrics
	metrics map[string]interface{}
	// mu is the mutex
	mu sync.RWMutex
	// started indicates if the connector is started
	started bool
	// topic is the Kafka topic
	topic string
	// brokers are the Kafka brokers
	brokers []string
	// groupID is the consumer group ID
	groupID string
	// offset is the offset type
	offset string
	// batchSize is the batch size
	batchSize int
	// messages are the buffered messages
	messages []*sarama.ConsumerMessage
}

// NewKafkaSourceConnector creates a new Kafka source connector
func NewKafkaSourceConnector() SourceConnector {
	return &KafkaSourceConnector{
		metrics:   make(map[string]interface{}),
		batchSize: 10,
		offset:    "newest",
		messages:  make([]*sarama.ConsumerMessage, 0),
	}
}

// Initialize initializes the connector
func (c *KafkaSourceConnector) Initialize(config map[string]interface{}) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Store the configuration
	c.config = config

	// Get topic
	if topic, ok := config["topic"].(string); ok {
		c.topic = topic
	} else {
		return fmt.Errorf("topic is required")
	}

	// Get brokers
	if brokers, ok := config["brokers"].([]interface{}); ok {
		c.brokers = make([]string, len(brokers))
		for i, broker := range brokers {
			if brokerStr, ok := broker.(string); ok {
				c.brokers[i] = brokerStr
			} else {
				return fmt.Errorf("broker must be a string")
			}
		}
	} else {
		return fmt.Errorf("brokers is required")
	}

	// Get group ID
	if groupID, ok := config["group_id"].(string); ok {
		c.groupID = groupID
	}

	// Get offset
	if offset, ok := config["offset"].(string); ok {
		c.offset = offset
	}

	// Get batch size
	if batchSize, ok := config["batch_size"].(float64); ok {
		c.batchSize = int(batchSize)
	}

	// Initialize metrics
	c.metrics["records_total"] = 0
	c.metrics["records_read"] = 0
	c.metrics["start_time"] = time.Now().Unix()

	return nil
}

// Start starts the connector
func (c *KafkaSourceConnector) Start(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already started
	if c.started {
		return fmt.Errorf("connector already started")
	}

	// Create Kafka config
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true

	// Set offset
	switch c.offset {
	case "oldest":
		config.Consumer.Offsets.Initial = sarama.OffsetOldest
	case "newest":
		config.Consumer.Offsets.Initial = sarama.OffsetNewest
	default:
		config.Consumer.Offsets.Initial = sarama.OffsetNewest
	}

	// Create consumer
	consumer, err := sarama.NewConsumer(c.brokers, config)
	if err != nil {
		return fmt.Errorf("failed to create consumer: %w", err)
	}
	c.consumer = consumer

	// Get partitions
	partitions, err := consumer.Partitions(c.topic)
	if err != nil {
		return fmt.Errorf("failed to get partitions: %w", err)
	}

	// Create partition consumers
	c.partitionConsumers = make([]sarama.PartitionConsumer, len(partitions))
	for i, partition := range partitions {
		partitionConsumer, err := consumer.ConsumePartition(c.topic, partition, config.Consumer.Offsets.Initial)
		if err != nil {
			return fmt.Errorf("failed to create partition consumer: %w", err)
		}
		c.partitionConsumers[i] = partitionConsumer
	}

	c.started = true

	return nil
}

// Stop stops the connector
func (c *KafkaSourceConnector) Stop(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already stopped
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Close partition consumers
	for _, partitionConsumer := range c.partitionConsumers {
		err := partitionConsumer.Close()
		if err != nil {
			return fmt.Errorf("failed to close partition consumer: %w", err)
		}
	}

	// Close consumer
	err := c.consumer.Close()
	if err != nil {
		return fmt.Errorf("failed to close consumer: %w", err)
	}

	c.started = false

	return nil
}

// Read reads records from the source
func (c *KafkaSourceConnector) Read(ctx context.Context) (*RecordBatch, error) {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return nil, fmt.Errorf("connector not started")
	}

	// Check if we have buffered messages
	if len(c.messages) > 0 {
		// Get batch size
		batchSize := c.batchSize
		if batchSize > len(c.messages) {
			batchSize = len(c.messages)
		}

		// Get messages
		messages := c.messages[:batchSize]
		c.messages = c.messages[batchSize:]

		// Convert messages to records
		records := make([]*Record, len(messages))
		for i, message := range messages {
			record := NewRecord(message.Key, message.Value)
			record.Timestamp = message.Timestamp
			record.Headers = make(map[string]string)
			for _, header := range message.Headers {
				record.Headers[string(header.Key)] = string(header.Value)
			}
			records[i] = record
		}

		// Update metrics
		c.metrics["records_read"] = c.metrics["records_read"].(int) + len(records)
		c.metrics["last_read_time"] = time.Now().Unix()

		return NewRecordBatch(records), nil
	}

	// Read messages from partition consumers
	for _, partitionConsumer := range c.partitionConsumers {
		select {
		case message := <-partitionConsumer.Messages():
			// Add message to buffer
			c.messages = append(c.messages, message)
			// Update metrics
			c.metrics["records_total"] = c.metrics["records_total"].(int) + 1
		case err := <-partitionConsumer.Errors():
			return nil, fmt.Errorf("failed to read message: %w", err)
		default:
			// No message available
		}
	}

	// Check if we have buffered messages
	if len(c.messages) > 0 {
		// Get batch size
		batchSize := c.batchSize
		if batchSize > len(c.messages) {
			batchSize = len(c.messages)
		}

		// Get messages
		messages := c.messages[:batchSize]
		c.messages = c.messages[batchSize:]

		// Convert messages to records
		records := make([]*Record, len(messages))
		for i, message := range messages {
			record := NewRecord(message.Key, message.Value)
			record.Timestamp = message.Timestamp
			record.Headers = make(map[string]string)
			for _, header := range message.Headers {
				record.Headers[string(header.Key)] = string(header.Value)
			}
			records[i] = record
		}

		// Update metrics
		c.metrics["records_read"] = c.metrics["records_read"].(int) + len(records)
		c.metrics["last_read_time"] = time.Now().Unix()

		return NewRecordBatch(records), nil
	}

	// No messages available
	return nil, nil
}

// Commit commits the offset
func (c *KafkaSourceConnector) Commit(ctx context.Context, offset interface{}) error {
	// Kafka consumer automatically commits offsets
	return nil
}

// GetMetrics gets connector metrics
func (c *KafkaSourceConnector) GetMetrics() map[string]interface{} {
	c.mu.RLock()
	defer c.mu.RUnlock()

	// Copy metrics
	metrics := make(map[string]interface{})
	for k, v := range c.metrics {
		metrics[k] = v
	}

	return metrics
}

// KafkaSinkConnector represents a Kafka sink connector
type KafkaSinkConnector struct {
	// config is the connector configuration
	config map[string]interface{}
	// producer is the Kafka producer
	producer sarama.SyncProducer
	// metrics are the connector metrics
	metrics map[string]interface{}
	// mu is the mutex
	mu sync.RWMutex
	// started indicates if the connector is started
	started bool
	// topic is the Kafka topic
	topic string
	// brokers are the Kafka brokers
	brokers []string
	// batchSize is the batch size
	batchSize int
	// buffer is the message buffer
	buffer []*sarama.ProducerMessage
}

// NewKafkaSinkConnector creates a new Kafka sink connector
func NewKafkaSinkConnector() SinkConnector {
	return &KafkaSinkConnector{
		metrics:   make(map[string]interface{}),
		batchSize: 10,
		buffer:    make([]*sarama.ProducerMessage, 0),
	}
}

// Initialize initializes the connector
func (c *KafkaSinkConnector) Initialize(config map[string]interface{}) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Store the configuration
	c.config = config

	// Get topic
	if topic, ok := config["topic"].(string); ok {
		c.topic = topic
	} else {
		return fmt.Errorf("topic is required")
	}

	// Get brokers
	if brokers, ok := config["brokers"].([]interface{}); ok {
		c.brokers = make([]string, len(brokers))
		for i, broker := range brokers {
			if brokerStr, ok := broker.(string); ok {
				c.brokers[i] = brokerStr
			} else {
				return fmt.Errorf("broker must be a string")
			}
		}
	} else {
		return fmt.Errorf("brokers is required")
	}

	// Get batch size
	if batchSize, ok := config["batch_size"].(float64); ok {
		c.batchSize = int(batchSize)
	}

	// Initialize metrics
	c.metrics["records_total"] = 0
	c.metrics["start_time"] = time.Now().Unix()

	return nil
}

// Start starts the connector
func (c *KafkaSinkConnector) Start(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already started
	if c.started {
		return fmt.Errorf("connector already started")
	}

	// Create Kafka config
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Retry.Max = 5
	config.Producer.Return.Successes = true

	// Create producer
	producer, err := sarama.NewSyncProducer(c.brokers, config)
	if err != nil {
		return fmt.Errorf("failed to create producer: %w", err)
	}
	c.producer = producer

	c.started = true

	return nil
}

// Stop stops the connector
func (c *KafkaSinkConnector) Stop(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already stopped
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Flush buffer
	err := c.flushBuffer()
	if err != nil {
		return fmt.Errorf("failed to flush buffer: %w", err)
	}

	// Close producer
	err = c.producer.Close()
	if err != nil {
		return fmt.Errorf("failed to close producer: %w", err)
	}

	c.started = false

	return nil
}

// Write writes records to the sink
func (c *KafkaSinkConnector) Write(ctx context.Context, batch *RecordBatch) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Check if batch is valid
	if batch == nil || len(batch.Records) == 0 {
		return nil
	}

	// Convert records to producer messages
	for _, record := range batch.Records {
		// Create message
		message := &sarama.ProducerMessage{
			Topic:     c.topic,
			Key:       sarama.ByteEncoder(record.Key),
			Value:     sarama.ByteEncoder(record.Value),
			Timestamp: record.Timestamp,
		}

		// Add headers
		if len(record.Headers) > 0 {
			headers := make([]sarama.RecordHeader, 0, len(record.Headers))
			for k, v := range record.Headers {
				headers = append(headers, sarama.RecordHeader{
					Key:   []byte(k),
					Value: []byte(v),
				})
			}
			message.Headers = headers
		}

		// Add message to buffer
		c.buffer = append(c.buffer, message)
	}

	// Check if buffer is full
	if len(c.buffer) >= c.batchSize {
		// Flush buffer
		err := c.flushBuffer()
		if err != nil {
			return fmt.Errorf("failed to flush buffer: %w", err)
		}
	}

	// Update metrics
	c.metrics["records_total"] = c.metrics["records_total"].(int) + len(batch.Records)
	c.metrics["last_write_time"] = time.Now().Unix()

	return nil
}

// Flush flushes any buffered records
func (c *KafkaSinkConnector) Flush(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Flush buffer
	err := c.flushBuffer()
	if err != nil {
		return fmt.Errorf("failed to flush buffer: %w", err)
	}

	return nil
}

// GetMetrics gets connector metrics
func (c *KafkaSinkConnector) GetMetrics() map[string]interface{} {
	c.mu.RLock()
	defer c.mu.RUnlock()

	// Copy metrics
	metrics := make(map[string]interface{})
	for k, v := range c.metrics {
		metrics[k] = v
	}

	return metrics
}

// flushBuffer flushes the message buffer
func (c *KafkaSinkConnector) flushBuffer() error {
	// Check if buffer is empty
	if len(c.buffer) == 0 {
		return nil
	}

	// Send messages
	err := c.producer.SendMessages(c.buffer)
	if err != nil {
		return fmt.Errorf("failed to send messages: %w", err)
	}

	// Clear buffer
	c.buffer = make([]*sarama.ProducerMessage, 0)

	return nil
}
