package kafka

import (
	"context"
	"fmt"
	"time"

	"github.com/IBM/sarama"
)

// Client represents a Kafka client with producer and consumer capabilities
type Client struct {
	config   *Config
	producer sarama.SyncProducer
	consumer sarama.Consumer
	admin    sarama.ClusterAdmin
}

// Config holds Kafka configuration
type Config struct {
	Brokers        []string
	Topic          string
	ConsumerGroup  string
	ProducerConfig *ProducerConfig
	ConsumerConfig *ConsumerConfig
	AdminConfig    *AdminConfig
}

// ProducerConfig holds producer-specific configuration
type ProducerConfig struct {
	RequiredAcks sarama.RequiredAcks
	Timeout      time.Duration
	RetryMax     int
	Compression  sarama.CompressionCodec
}

// ConsumerConfig holds consumer-specific configuration
type ConsumerConfig struct {
	AutoOffsetReset   string
	SessionTimeout    time.Duration
	HeartbeatInterval time.Duration
	MaxWaitTime       time.Duration
}

// AdminConfig holds admin-specific configuration
type AdminConfig struct {
	Timeout time.Duration
}

// NewClient creates a new Kafka client
func NewClient(config *Config) (*Client, error) {
	if err := validateConfig(config); err != nil {
		return nil, fmt.Errorf("invalid config: %w", err)
	}

	client := &Client{
		config: config,
	}

	// Initialize producer
	if err := client.initProducer(); err != nil {
		return nil, fmt.Errorf("failed to initialize producer: %w", err)
	}

	// Initialize consumer
	if err := client.initConsumer(); err != nil {
		return nil, fmt.Errorf("failed to initialize consumer: %w", err)
	}

	// Initialize admin client
	if err := client.initAdmin(); err != nil {
		return nil, fmt.Errorf("failed to initialize admin client: %w", err)
	}

	return client, nil
}

// validateConfig validates the Kafka configuration
func validateConfig(config *Config) error {
	if len(config.Brokers) == 0 {
		return fmt.Errorf("at least one broker must be specified")
	}
	if config.Topic == "" {
		return fmt.Errorf("topic must be specified")
	}
	if config.ConsumerGroup == "" {
		return fmt.Errorf("consumer group must be specified")
	}
	return nil
}

// initProducer initializes the Kafka producer
func (c *Client) initProducer() error {
	config := sarama.NewConfig()

	// Set default producer config
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Retry.Max = 3
	config.Producer.Return.Successes = true
	config.Producer.Timeout = 10 * time.Second
	config.Producer.Compression = sarama.CompressionSnappy

	// Override with custom config if provided
	if c.config.ProducerConfig != nil {
		if c.config.ProducerConfig.RequiredAcks != 0 {
			config.Producer.RequiredAcks = c.config.ProducerConfig.RequiredAcks
		}
		if c.config.ProducerConfig.Timeout > 0 {
			config.Producer.Timeout = c.config.ProducerConfig.Timeout
		}
		if c.config.ProducerConfig.RetryMax > 0 {
			config.Producer.Retry.Max = c.config.ProducerConfig.RetryMax
		}
		if c.config.ProducerConfig.Compression != 0 {
			config.Producer.Compression = c.config.ProducerConfig.Compression
		}
	}

	producer, err := sarama.NewSyncProducer(c.config.Brokers, config)
	if err != nil {
		return fmt.Errorf("failed to create producer: %w", err)
	}

	c.producer = producer
	return nil
}

// initConsumer initializes the Kafka consumer
func (c *Client) initConsumer() error {
	config := sarama.NewConfig()

	// Set default consumer config
	config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
	config.Consumer.Offsets.Initial = sarama.OffsetOldest
	config.Consumer.Offsets.AutoCommit.Enable = true
	config.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second

	// Override with custom config if provided
	if c.config.ConsumerConfig != nil {
		if c.config.ConsumerConfig.AutoOffsetReset != "" {
			switch c.config.ConsumerConfig.AutoOffsetReset {
			case "earliest":
				config.Consumer.Offsets.Initial = sarama.OffsetOldest
			case "latest":
				config.Consumer.Offsets.Initial = sarama.OffsetNewest
			}
		}
		if c.config.ConsumerConfig.SessionTimeout > 0 {
			config.Consumer.Group.Session.Timeout = c.config.ConsumerConfig.SessionTimeout
		}
		if c.config.ConsumerConfig.HeartbeatInterval > 0 {
			config.Consumer.Group.Heartbeat.Interval = c.config.ConsumerConfig.HeartbeatInterval
		}
		if c.config.ConsumerConfig.MaxWaitTime > 0 {
			config.Consumer.MaxWaitTime = c.config.ConsumerConfig.MaxWaitTime
		}
	}

	consumer, err := sarama.NewConsumer(c.config.Brokers, config)
	if err != nil {
		return fmt.Errorf("failed to create consumer: %w", err)
	}

	c.consumer = consumer
	return nil
}

// initAdmin initializes the Kafka admin client
func (c *Client) initAdmin() error {
	config := sarama.NewConfig()

	// Set default admin config
	config.Version = sarama.V2_8_0_0

	// Override with custom config if provided
	if c.config.AdminConfig != nil && c.config.AdminConfig.Timeout > 0 {
		config.Net.DialTimeout = c.config.AdminConfig.Timeout
		config.Net.ReadTimeout = c.config.AdminConfig.Timeout
		config.Net.WriteTimeout = c.config.AdminConfig.Timeout
	}

	admin, err := sarama.NewClusterAdmin(c.config.Brokers, config)
	if err != nil {
		return fmt.Errorf("failed to create admin client: %w", err)
	}

	c.admin = admin
	return nil
}

// Close closes the Kafka client and all its connections
func (c *Client) Close() error {
	var errors []error

	if c.producer != nil {
		if err := c.producer.Close(); err != nil {
			errors = append(errors, fmt.Errorf("failed to close producer: %w", err))
		}
	}

	if c.consumer != nil {
		if err := c.consumer.Close(); err != nil {
			errors = append(errors, fmt.Errorf("failed to close consumer: %w", err))
		}
	}

	if c.admin != nil {
		if err := c.admin.Close(); err != nil {
			errors = append(errors, fmt.Errorf("failed to close admin client: %w", err))
		}
	}

	if len(errors) > 0 {
		return fmt.Errorf("errors closing client: %v", errors)
	}

	return nil
}

// HealthCheck performs a health check on the Kafka cluster
func (c *Client) HealthCheck(ctx context.Context) error {
	if c.admin == nil {
		return fmt.Errorf("admin client not initialized")
	}

	// Check if we can list topics (basic connectivity test)
	_, err := c.admin.ListTopics()
	if err != nil {
		return fmt.Errorf("health check failed: %w", err)
	}

	return nil
}

// GetProducer returns the Kafka producer
func (c *Client) GetProducer() sarama.SyncProducer {
	return c.producer
}

// GetConsumer returns the Kafka consumer
func (c *Client) GetConsumer() sarama.Consumer {
	return c.consumer
}

// GetAdmin returns the Kafka admin client
func (c *Client) GetAdmin() sarama.ClusterAdmin {
	return c.admin
}

// GetConfig returns the Kafka configuration
func (c *Client) GetConfig() *Config {
	return c.config
}
