package kafka

import (
	"context"
	"encoding/json"
	"fmt"
	"log"
	"time"

	"github.com/IBM/sarama"
)

// Message represents a Kafka message with metadata
type Message struct {
	Topic     string
	Partition int32
	Offset    int64
	Key       []byte
	Value     []byte
	Timestamp time.Time
	Headers   []sarama.RecordHeader
}

// JobMessage represents a job message structure
type JobMessage struct {
	JobID      string                 `json:"job_id"`
	JobType    string                 `json:"job_type"`
	ClusterID  string                 `json:"cluster_id"`
	Parameters map[string]interface{} `json:"parameters"`
	Priority   int                    `json:"priority"`
	CreatedAt  time.Time              `json:"created_at"`
	RetryCount int                    `json:"retry_count"`
}

// Reader represents a Kafka message reader
type Reader struct {
	client    *Client
	consumer  sarama.ConsumerGroup
	handlers  map[string]MessageHandler
	errorChan chan error
	doneChan  chan struct{}
}

// MessageHandler is a function type for handling Kafka messages
type MessageHandler func(ctx context.Context, msg *Message) error

// NewReader creates a new Kafka reader
func NewReader(client *Client) (*Reader, error) {
	if client == nil {
		return nil, fmt.Errorf("client cannot be nil")
	}

	config := sarama.NewConfig()
	config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
	config.Consumer.Offsets.Initial = sarama.OffsetOldest
	config.Consumer.Offsets.AutoCommit.Enable = true
	config.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second

	// Override with custom config if provided
	if client.config.ConsumerConfig != nil {
		if client.config.ConsumerConfig.SessionTimeout > 0 {
			config.Consumer.Group.Session.Timeout = client.config.ConsumerConfig.SessionTimeout
		}
		if client.config.ConsumerConfig.HeartbeatInterval > 0 {
			config.Consumer.Group.Heartbeat.Interval = client.config.ConsumerConfig.HeartbeatInterval
		}
		if client.config.ConsumerConfig.MaxWaitTime > 0 {
			config.Consumer.MaxWaitTime = client.config.ConsumerConfig.MaxWaitTime
		}
	}

	consumer, err := sarama.NewConsumerGroup(client.config.Brokers, client.config.ConsumerGroup, config)
	if err != nil {
		return nil, fmt.Errorf("failed to create consumer group: %w", err)
	}

	return &Reader{
		client:    client,
		consumer:  consumer,
		handlers:  make(map[string]MessageHandler),
		errorChan: make(chan error, 100),
		doneChan:  make(chan struct{}),
	}, nil
}

// RegisterHandler registers a message handler for a specific topic
func (r *Reader) RegisterHandler(topic string, handler MessageHandler) {
	r.handlers[topic] = handler
}

// Start starts the reader and begins consuming messages
func (r *Reader) Start(ctx context.Context) error {
	topics := []string{r.client.config.Topic}

	go func() {
		for {
			select {
			case <-ctx.Done():
				return
			case <-r.doneChan:
				return
			default:
				if err := r.consumer.Consume(ctx, topics, r); err != nil {
					r.errorChan <- fmt.Errorf("error from consumer: %w", err)
				}
			}
		}
	}()

	return nil
}

// Stop stops the reader
func (r *Reader) Stop() error {
	close(r.doneChan)
	return r.consumer.Close()
}

// Errors returns the error channel
func (r *Reader) Errors() <-chan error {
	return r.errorChan
}

// Setup is called when a new consumer group session is about to begin
func (r *Reader) Setup(sarama.ConsumerGroupSession) error {
	return nil
}

// Cleanup is called when a consumer group session has ended
func (r *Reader) Cleanup(sarama.ConsumerGroupSession) error {
	return nil
}

// ConsumeClaim processes messages from a partition
func (r *Reader) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
	for {
		select {
		case msg := <-claim.Messages():
			if msg == nil {
				return nil
			}

			// Convert sarama message to our Message type
			message := &Message{
				Topic:     msg.Topic,
				Partition: msg.Partition,
				Offset:    msg.Offset,
				Key:       msg.Key,
				Value:     msg.Value,
				Timestamp: msg.Timestamp,
				Headers:   convertHeaders(msg.Headers),
			}

			// Find handler for this topic
			handler, exists := r.handlers[msg.Topic]
			if !exists {
				log.Printf("No handler registered for topic: %s", msg.Topic)
				session.MarkMessage(msg, "")
				continue
			}

			// Process message with context
			ctx := context.Background()
			if err := handler(ctx, message); err != nil {
				log.Printf("Error processing message from topic %s: %v", msg.Topic, err)
				// Don't mark message as processed on error
				continue
			}

			// Mark message as processed
			session.MarkMessage(msg, "")

		case <-session.Context().Done():
			return nil
		}
	}
}

// DeserializeJobMessage deserializes a Kafka message into a JobMessage
func DeserializeJobMessage(msg *Message) (*JobMessage, error) {
	if msg == nil {
		return nil, fmt.Errorf("message cannot be nil")
	}

	var jobMsg JobMessage
	if err := json.Unmarshal(msg.Value, &jobMsg); err != nil {
		return nil, fmt.Errorf("failed to deserialize job message: %w", err)
	}

	return &jobMsg, nil
}

// SerializeJobMessage serializes a JobMessage into a Kafka message
func SerializeJobMessage(jobMsg *JobMessage) ([]byte, error) {
	if jobMsg == nil {
		return nil, fmt.Errorf("job message cannot be nil")
	}

	data, err := json.Marshal(jobMsg)
	if err != nil {
		return nil, fmt.Errorf("failed to serialize job message: %w", err)
	}

	return data, nil
}

// ValidateJobMessage validates a JobMessage
func ValidateJobMessage(jobMsg *JobMessage) error {
	if jobMsg == nil {
		return fmt.Errorf("job message cannot be nil")
	}

	if jobMsg.JobID == "" {
		return fmt.Errorf("job_id is required")
	}

	if jobMsg.JobType == "" {
		return fmt.Errorf("job_type is required")
	}

	if jobMsg.ClusterID == "" {
		return fmt.Errorf("cluster_id is required")
	}

	if jobMsg.Priority < 0 || jobMsg.Priority > 2 {
		return fmt.Errorf("priority must be between 0 and 2")
	}

	return nil
}

// RetryMessage creates a retry message with incremented retry count
func RetryMessage(jobMsg *JobMessage, maxRetries int) (*JobMessage, error) {
	if jobMsg == nil {
		return nil, fmt.Errorf("job message cannot be nil")
	}

	if jobMsg.RetryCount >= maxRetries {
		return nil, fmt.Errorf("max retries exceeded")
	}

	retryMsg := *jobMsg
	retryMsg.RetryCount++
	retryMsg.CreatedAt = time.Now()

	return &retryMsg, nil
}

// convertHeaders converts []*sarama.RecordHeader to []sarama.RecordHeader
func convertHeaders(headers []*sarama.RecordHeader) []sarama.RecordHeader {
	if headers == nil {
		return nil
	}

	result := make([]sarama.RecordHeader, len(headers))
	for i, header := range headers {
		result[i] = *header
	}
	return result
}
