package archive

import (
	"context"
	"encoding/json"
	"fmt"
	"log"
	"sync"
	"time"

	"github.com/Shopify/sarama"
	"gitlab.com/gitlab-org/gitaly/v14/internal/git/repository"
	"gitlab.com/gitlab-org/gitaly/v14/internal/gitaly/archive"
)

// KafkaProducerConfig contains Kafka producer configuration
type KafkaProducerConfig struct {
	Brokers []string // Kafka broker addresses
	Topic   string   // Topic to send messages to
	Enabled bool     // Whether Kafka producer is enabled
}

// TaskMessage 消息格式，包含task_id、仓库信息、时间和元数据文件名称
type TaskMessage struct {
	TaskID           string    `json:"task_id"`
	RepositoryPath   string    `json:"repository_path"`
	StorageName      string    `json:"storage_name"`
	Timestamp        time.Time `json:"timestamp"`
	MetadataFileName string    `json:"metadata_file_name"`
}

// KafkaProducer handles Kafka message production for archive tasks
type KafkaProducer struct {
	config   KafkaProducerConfig
	producer sarama.SyncProducer
	enabled  bool
	mu       sync.RWMutex
}

// NewKafkaProducer creates a new Kafka producer
func NewKafkaProducer(config KafkaProducerConfig) (*KafkaProducer, error) {
	kp := &KafkaProducer{
		config:  config,
		enabled: config.Enabled,
	}

	if !config.Enabled {
		log.Println("Kafka producer is disabled")
		return kp, nil
	}

	// Validate config
	if len(config.Brokers) == 0 {
		return nil, fmt.Errorf("kafka brokers cannot be empty when enabled")
	}
	if config.Topic == "" {
		return nil, fmt.Errorf("kafka topic cannot be empty when enabled")
	}

	// Create Kafka producer configuration
	kafkaConfig := sarama.NewConfig()
	kafkaConfig.Producer.RequiredAcks = sarama.WaitForAll // Wait for all replicas
	kafkaConfig.Producer.Retry.Max = 3
	kafkaConfig.Producer.Return.Successes = true
	kafkaConfig.Producer.Return.Errors = true
	kafkaConfig.Producer.Compression = sarama.CompressionSnappy
	kafkaConfig.Producer.Partitioner = sarama.NewHashPartitioner // Use hash partitioner for consistent routing

	// Create producer
	producer, err := sarama.NewSyncProducer(config.Brokers, kafkaConfig)
	if err != nil {
		return nil, fmt.Errorf("failed to create Kafka producer: %w", err)
	}

	kp.producer = producer
	log.Printf("Kafka producer initialized for topic: %s, brokers: %v", config.Topic, config.Brokers)

	return kp, nil
}

// IsEnabled returns whether the Kafka producer is enabled
func (kp *KafkaProducer) IsEnabled() bool {
	kp.mu.RLock()
	defer kp.mu.RUnlock()
	return kp.enabled
}

// SendTaskMessage sends a task message with repository info and metadata filename to Kafka
func (kp *KafkaProducer) SendTaskMessage(ctx context.Context, taskID, repositoryPath, storageName, metadataFileName string) error {
	if !kp.enabled {
		log.Printf("Kafka producer is disabled, skipping task %s", taskID)
		return nil
	}

	kp.mu.RLock()
	defer kp.mu.RUnlock()

	if kp.producer == nil {
		return fmt.Errorf("kafka producer is not initialized")
	}

	// Create message with repository info and metadata filename
	taskMessage := TaskMessage{
		TaskID:           taskID,
		RepositoryPath:   repositoryPath,
		StorageName:      storageName,
		Timestamp:        time.Now(),
		MetadataFileName: metadataFileName,
	}

	// Serialize message
	messageBytes, err := json.Marshal(taskMessage)
	if err != nil {
		return fmt.Errorf("failed to marshal task message: %w", err)
	}

	// Create Kafka message
	msg := &sarama.ProducerMessage{
		Topic: kp.config.Topic,
		Key:   sarama.StringEncoder(taskID), // Use taskID as key for partitioning
		Value: sarama.ByteEncoder(messageBytes),
		Headers: []sarama.RecordHeader{
			{
				Key:   []byte("task_id"),
				Value: []byte(taskID),
			},
			{
				Key:   []byte("repository_path"),
				Value: []byte(repositoryPath),
			},
			{
				Key:   []byte("storage_name"),
				Value: []byte(storageName),
			},
			{
				Key:   []byte("timestamp"),
				Value: []byte(time.Now().Format(time.RFC3339)),
			},
		},
	}

	// Send message
	partition, offset, err := kp.producer.SendMessage(msg)
	if err != nil {
		return fmt.Errorf("failed to send message to Kafka: %w", err)
	}

	log.Printf("Archive task %s sent to Kafka successfully (partition: %d, offset: %d, repo: %s, file: %s)",
		taskID, partition, offset, repositoryPath, metadataFileName)

	return nil
}

// SendArchiveTask sends an archive task message (legacy method for compatibility)
func (kp *KafkaProducer) SendArchiveTask(ctx context.Context, taskMessage ArchiveTaskMessage) error {
	// 构建元数据文件名
	metadataFileName := fmt.Sprintf("refs-list-%d-%s-archive.json", time.Now().Unix(), taskMessage.TaskID)
	return kp.SendTaskMessage(ctx, taskMessage.TaskID, "", taskMessage.Repository.GetStorageName(), metadataFileName)
}

// SendExpiredRefTask sends a task for expired refs found during housekeeping
func (kp *KafkaProducer) SendExpiredRefTask(ctx context.Context, repo repository.GitRepo, entries []archive.MetadataEntry, taskIDPrefix string) error {
	if len(entries) == 0 {
		return nil
	}

	taskID := fmt.Sprintf("%s-%d", taskIDPrefix, time.Now().Unix())
	metadataFileName := fmt.Sprintf("refs-list-%d-%s-archive.json", time.Now().Unix(), taskID)
	return kp.SendTaskMessage(ctx, taskID, "", repo.GetStorageName(), metadataFileName)
}

// Close closes the Kafka producer
func (kp *KafkaProducer) Close() error {
	kp.mu.Lock()
	defer kp.mu.Unlock()

	if kp.producer != nil {
		if err := kp.producer.Close(); err != nil {
			return fmt.Errorf("failed to close Kafka producer: %w", err)
		}
		kp.producer = nil
		log.Println("Kafka producer closed")
	}

	return nil
}

// Legacy ArchiveTaskMessage for backward compatibility
type ArchiveTaskMessage struct {
	TaskID     string                `json:"task_id"`
	Repository repository.GitRepo    `json:"repository"`
	Entries    []archive.MetadataEntry `json:"entries"`
	Timestamp  time.Time             `json:"timestamp"`
	CreatedBy  string                `json:"created_by"`
	Priority   int                   `json:"priority"`
}
