package archive

import (
	"context"
	"encoding/json"
	"fmt"
	"io/ioutil"
	"log"
	"os"
	"path/filepath"
	"strings"
	"sync"
	"time"

	"github.com/Shopify/sarama"
	"gitlab.com/gitlab-org/gitaly/v14/internal/git"
	"gitlab.com/gitlab-org/gitaly/v14/internal/git/localrepo"
	"gitlab.com/gitlab-org/gitaly/v14/internal/git/repository"
	"gitlab.com/gitlab-org/gitaly/v14/internal/gitaly/archive"
	"gitlab.com/gitlab-org/gitaly/v14/internal/gitaly/config"
	"gitlab.com/gitlab-org/gitaly/v14/internal/storage"
)

// KafkaConfig contains Kafka consumer configuration
type KafkaConfig struct {
	Brokers       []string      // Kafka broker addresses
	Topic         string        // Topic to consume from
	GroupID       string        // Consumer group ID
	RetryAttempts int           // Maximum retry attempts for failed messages
	RetryDelay    time.Duration // Delay between retries
}

// TaskMessage 消息格式，包含task_id、仓库信息、时间和元数据文件名称
type TaskMessage struct {
	TaskID           string    `json:"task_id"`
	RepositoryPath   string    `json:"repository_path"`
	StorageName      string    `json:"storage_name"`
	Timestamp        time.Time `json:"timestamp"`
	MetadataFileName string    `json:"metadata_file_name"`
}

// KafkaConsumer handles Kafka message consumption for archive tasks
type KafkaConsumer struct {
	cfg            config.Cfg
	kafkaConfig    KafkaConfig
	locator        storage.Locator
	gitCmdFactory  git.CommandFactory
	storageManager *archive.StorageManager
	gitOperations  *archive.GitOperations
	lockManager    *archive.LockManager

	consumerGroup  sarama.ConsumerGroup
	running        bool
	ctx            context.Context
	cancel         context.CancelFunc
	wg             sync.WaitGroup
	mu             sync.RWMutex
}

// 批量处理的最大引用数量
const MaxBatchSize = 100

// NewKafkaConsumer creates a new Kafka consumer
func NewKafkaConsumer(
	cfg config.Cfg,
	kafkaConfig KafkaConfig,
	locator storage.Locator,
	gitCmdFactory git.CommandFactory,
	storageManager *archive.StorageManager,
	gitOperations *archive.GitOperations,
	lockManager *archive.LockManager,
) (*KafkaConsumer, error) {
	// Validate config
	if len(kafkaConfig.Brokers) == 0 {
		return nil, fmt.Errorf("kafka brokers cannot be empty")
	}
	if kafkaConfig.Topic == "" {
		return nil, fmt.Errorf("kafka topic cannot be empty")
	}
	if kafkaConfig.GroupID == "" {
		kafkaConfig.GroupID = "gitaly-archive-consumer"
	}
	if kafkaConfig.RetryAttempts == 0 {
		kafkaConfig.RetryAttempts = 3
	}
	if kafkaConfig.RetryDelay == 0 {
		kafkaConfig.RetryDelay = 30 * time.Second
	}

	return &KafkaConsumer{
		cfg:            cfg,
		kafkaConfig:    kafkaConfig,
		locator:        locator,
		gitCmdFactory:  gitCmdFactory,
		storageManager: storageManager,
		gitOperations:  gitOperations,
		lockManager:    lockManager,
	}, nil
}

// Start starts the Kafka consumer
func (kc *KafkaConsumer) Start(ctx context.Context) error {
	kc.mu.Lock()
	defer kc.mu.Unlock()

	if kc.running {
		return fmt.Errorf("kafka consumer is already running")
	}

	// Create Kafka consumer configuration
	kafkaConfig := sarama.NewConfig()
	kafkaConfig.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
	kafkaConfig.Consumer.Offsets.Initial = sarama.OffsetOldest
	kafkaConfig.Consumer.Group.Session.Timeout = 30 * time.Second
	kafkaConfig.Consumer.Group.Heartbeat.Interval = 3 * time.Second
	kafkaConfig.Consumer.Return.Errors = true
	kafkaConfig.Consumer.Offsets.AutoCommit.Enable = false // Manual commit for better control

	// Create consumer group
	consumerGroup, err := sarama.NewConsumerGroup(kc.kafkaConfig.Brokers, kc.kafkaConfig.GroupID, kafkaConfig)
	if err != nil {
		return fmt.Errorf("failed to create Kafka consumer group: %w", err)
	}

	kc.consumerGroup = consumerGroup
	kc.ctx, kc.cancel = context.WithCancel(ctx)
	kc.running = true

	// Start consumer goroutine
	kc.wg.Add(1)
	go kc.consume()

	// Start error handler goroutine
	kc.wg.Add(1)
	go kc.handleErrors()

	log.Printf("Kafka consumer started for topic: %s, group: %s", kc.kafkaConfig.Topic, kc.kafkaConfig.GroupID)
	return nil
}

// Stop stops the Kafka consumer
func (kc *KafkaConsumer) Stop() error {
	kc.mu.Lock()
	defer kc.mu.Unlock()

	if !kc.running {
		return nil
	}

	kc.cancel()
	kc.running = false

	if kc.consumerGroup != nil {
		if err := kc.consumerGroup.Close(); err != nil {
			log.Printf("Error closing Kafka consumer group: %v", err)
		}
	}

	kc.wg.Wait()
	log.Println("Kafka consumer stopped")
	return nil
}

// consume handles message consumption
func (kc *KafkaConsumer) consume() {
	defer kc.wg.Done()

	for {
		select {
		case <-kc.ctx.Done():
			return
		default:
		}

		err := kc.consumerGroup.Consume(kc.ctx, []string{kc.kafkaConfig.Topic}, kc)
		if err != nil {
			log.Printf("Error consuming from Kafka: %v", err)
			// Wait before retrying
			select {
			case <-kc.ctx.Done():
				return
			case <-time.After(5 * time.Second):
			}
		}
	}
}

// handleErrors handles Kafka consumer errors
func (kc *KafkaConsumer) handleErrors() {
	defer kc.wg.Done()

	for {
		select {
		case <-kc.ctx.Done():
			return
		case err := <-kc.consumerGroup.Errors():
			if err != nil {
				log.Printf("Kafka consumer error: %v", err)
			}
		}
	}
}

// Setup is called when a new consumer group session starts
func (kc *KafkaConsumer) Setup(sarama.ConsumerGroupSession) error {
	return nil
}

// Cleanup is called when a consumer group session ends
func (kc *KafkaConsumer) Cleanup(sarama.ConsumerGroupSession) error {
	return nil
}

// ConsumeClaim processes messages from a specific partition
func (kc *KafkaConsumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
	for {
		select {
		case <-session.Context().Done():
			return nil
		case message := <-claim.Messages():
			if message == nil {
				continue
			}

			// Process the message
			if err := kc.processMessage(session.Context(), message); err != nil {
				log.Printf("Failed to process message: %v", err)
				// Don't commit the message if processing failed
				continue
			}

			// Mark message as processed
			session.MarkMessage(message, "")

			// Commit the offset manually
			if err := session.Commit(); err != nil {
				log.Printf("Failed to commit offset: %v", err)
			}
		}
	}
}

// processMessage processes a single Kafka message
func (kc *KafkaConsumer) processMessage(ctx context.Context, message *sarama.ConsumerMessage) error {
	// Parse message (now includes repository info and metadata filename)
	var taskMessage TaskMessage
	if err := json.Unmarshal(message.Value, &taskMessage); err != nil {
		return fmt.Errorf("failed to unmarshal message: %w", err)
	}

	if taskMessage.TaskID == "" {
		return fmt.Errorf("task_id is required")
	}

	log.Printf("Processing archive task: %s (repo: %s, file: %s)",
		taskMessage.TaskID, taskMessage.RepositoryPath, taskMessage.MetadataFileName)

	// Process the task with retry mechanism
	return kc.processTaskWithRetry(ctx, &taskMessage)
}

// processTaskWithRetry processes a task with retry mechanism
func (kc *KafkaConsumer) processTaskWithRetry(ctx context.Context, taskMessage *TaskMessage) error {
	var lastErr error

	for attempt := 1; attempt <= kc.kafkaConfig.RetryAttempts; attempt++ {
		if err := kc.processTaskByTaskMessage(ctx, taskMessage); err != nil {
			lastErr = err
			log.Printf("Task %s failed on attempt %d/%d: %v",
				taskMessage.TaskID, attempt, kc.kafkaConfig.RetryAttempts, err)

			if attempt < kc.kafkaConfig.RetryAttempts {
				// Wait before retrying
				select {
				case <-ctx.Done():
					return ctx.Err()
				case <-time.After(kc.kafkaConfig.RetryDelay):
				}
			}
		} else {
			log.Printf("Task %s completed successfully on attempt %d", taskMessage.TaskID, attempt)
			return nil
		}
	}

	return fmt.Errorf("task %s failed after %d attempts: %w",
		taskMessage.TaskID, kc.kafkaConfig.RetryAttempts, lastErr)
}

// processTaskByTaskMessage 根据TaskMessage处理任务
func (kc *KafkaConsumer) processTaskByTaskMessage(ctx context.Context, taskMessage *TaskMessage) error {
	// 获取任务锁，防止并发处理同一个任务
	lockName := fmt.Sprintf("archive-task-%s", taskMessage.TaskID)
	_, err := kc.lockManager.AcquireLock(ctx, lockName)
	if err != nil {
		if err == archive.ErrLockAlreadyHeld {
			log.Printf("Task %s is already being processed", taskMessage.TaskID)
			return nil // 不是错误，只是跳过
		}
		return fmt.Errorf("failed to acquire task lock: %w", err)
	}
	defer func() {
		if err := kc.lockManager.ReleaseLock(lockName); err != nil {
			log.Printf("Failed to release task lock %s: %v", lockName, err)
		}
	}()

	// 从文件名或者仓库路径查找所有waiting状态的元数据文件
	entries, taskID, filePath, err := kc.findMetadataFileByTaskMessage(taskMessage)
	if err != nil {
		return fmt.Errorf("failed to find metadata file: %w", err)
	}

	if len(entries) == 0 {
		log.Printf("No entries found for task %s", taskMessage.TaskID)
		return nil
	}

	log.Printf("Processing task %s with %d entries", taskID, len(entries))

	// 确定操作类型和引用类型
	if len(entries) == 0 {
		return fmt.Errorf("no entries to process")
	}

	action := entries[0].Action
	refType := archive.ParseRefType(entries[0].SourceRefName)

	// 分批处理引用
	return kc.processTaskWithBatch(ctx, entries, action, taskMessage.RepositoryPath, refType, taskID, filePath)
}

// processTaskByID 根据task_id处理任务（向后兼容）
func (kc *KafkaConsumer) processTaskByID(ctx context.Context, taskID string) error {
	// 查找包含指定task_id的元数据文件
	metadataFiles, err := kc.findMetadataFilesByTaskID(taskID)
	if err != nil {
		return fmt.Errorf("failed to find metadata files for task %s: %w", taskID, err)
	}

	if len(metadataFiles) == 0 {
		log.Printf("No metadata files found for task_id: %s", taskID)
		return nil
	}

	log.Printf("Found %d metadata files for task_id: %s", len(metadataFiles), taskID)

	// 处理每个元数据文件
	for _, metadataFile := range metadataFiles {
		if err := kc.processMetadataFile(ctx, metadataFile, taskID); err != nil {
			return fmt.Errorf("failed to process metadata file %s: %w", metadataFile.FilePath, err)
		}
	}

	return nil
}

// MetadataFileInfo 包含元数据文件信息
type MetadataFileInfo struct {
	FilePath   string
	RepoPath   string
	RefType    string
	Status     archive.OperationStatus
	Action     archive.OperationAction
}

// findMetadataFilesByTaskID 查找包含指定task_id的元数据文件
func (kc *KafkaConsumer) findMetadataFilesByTaskID(taskID string) ([]MetadataFileInfo, error) {
	var metadataFiles []MetadataFileInfo

	// 获取所有存储的路径
	for _, storage := range kc.cfg.Storages {
		storageRoot := storage.Path

		// 遍历存储路径下的所有仓库
		err := filepath.Walk(storageRoot, func(path string, info os.FileInfo, err error) error {
			if err != nil {
				return nil // 忽略错误，继续处理其他文件
			}

			// 检查是否是元数据文件
			if !strings.HasSuffix(path, ".json") {
				return nil
			}

			// 检查路径是否包含archive信息
			if !strings.Contains(path, "/info/archive/refs/") {
				return nil
			}

			// 检查文件名是否包含task_id
			if !strings.Contains(info.Name(), taskID) {
				return nil
			}

			// 解析路径以获取仓库路径、引用类型和状态
			if repoPath, refType, status, action := kc.parseMetadataFilePath(path); repoPath != "" {
				metadataFiles = append(metadataFiles, MetadataFileInfo{
					FilePath: path,
					RepoPath: repoPath,
					RefType:  refType,
					Status:   status,
					Action:   action,
				})
			}

			return nil
		})

		if err != nil {
			log.Printf("Error walking storage path %s: %v", storageRoot, err)
		}
	}

	return metadataFiles, nil
}

// parseMetadataFilePath 解析元数据文件路径
func (kc *KafkaConsumer) parseMetadataFilePath(filePath string) (repoPath, refType string, status archive.OperationStatus, action archive.OperationAction) {
	// 路径格式：xxx.git/info/archive/refs/<refs-type>/<status>/refs-list-<timestamp>-<task_id>-<action>.json

	// 查找 /info/archive/refs/ 的位置
	archiveIdx := strings.Index(filePath, "/info/archive/refs/")
	if archiveIdx == -1 {
		return "", "", "", ""
	}

	// 提取仓库路径
	repoPath = filePath[:archiveIdx]
	if !strings.HasSuffix(repoPath, ".git") {
		return "", "", "", ""
	}

	// 解析路径的其余部分
	remaining := filePath[archiveIdx+len("/info/archive/refs/"):]
	parts := strings.Split(remaining, "/")
	if len(parts) < 3 {
		return "", "", "", ""
	}

	refType = parts[0]
	status = archive.OperationStatus(parts[1])

	// 从文件名中提取action
	fileName := parts[len(parts)-1]
	if strings.Contains(fileName, "-archive.json") {
		action = archive.ActionArchive
	} else if strings.Contains(fileName, "-restore.json") {
		action = archive.ActionRestore
	} else if strings.Contains(fileName, "-delete.json") {
		action = archive.ActionDelete
	}

	return repoPath, refType, status, action
}

// processMetadataFile 处理单个元数据文件
func (kc *KafkaConsumer) processMetadataFile(ctx context.Context, metadataFile MetadataFileInfo, taskID string) error {
	log.Printf("Processing metadata file: %s", metadataFile.FilePath)

	// 读取元数据文件
	entries, fileTaskID, err := kc.storageManager.ReadMetadataFromFile(metadataFile.FilePath)
	if err != nil {
		return fmt.Errorf("failed to read metadata file: %w", err)
	}

	// 验证task_id匹配
	if fileTaskID != taskID {
		log.Printf("Task ID mismatch in file %s: expected %s, got %s", metadataFile.FilePath, taskID, fileTaskID)
		return nil // 跳过不匹配的文件
	}

	if len(entries) == 0 {
		log.Printf("No entries found in metadata file: %s", metadataFile.FilePath)
		return nil
	}

	// 如果是waiting状态，移动到doing状态
	if metadataFile.Status == archive.StatusWaiting {
		if err := kc.storageManager.MoveMetadata(metadataFile.RepoPath, metadataFile.RefType,
			archive.StatusWaiting, archive.StatusDoing, taskID); err != nil {
			return fmt.Errorf("failed to move metadata to doing: %w", err)
		}
	}

	// 批量处理引用
	if err := kc.processTaskWithBatch(ctx, entries, metadataFile.Action, metadataFile.RepoPath, metadataFile.RefType, taskID, metadataFile.FilePath); err != nil {
		return fmt.Errorf("failed to process batch: %w", err)
	}

	return nil
}

// processTaskWithBatch 批量处理任务，每次最多处理100个引用
func (kc *KafkaConsumer) processTaskWithBatch(ctx context.Context, entries []archive.MetadataEntry, action archive.OperationAction, repoPath, refType, taskID, originalFilePath string) error {
	totalEntries := len(entries)
	log.Printf("Processing %d entries for task %s in batches of %d", totalEntries, taskID, MaxBatchSize)

	var allSuccessful []archive.MetadataEntry
	var allFailed []archive.MetadataEntry

	// 分批处理
	for i := 0; i < totalEntries; i += MaxBatchSize {
		end := i + MaxBatchSize
		if end > totalEntries {
			end = totalEntries
		}

		batch := entries[i:end]
		log.Printf("Processing batch %d-%d of %d entries", i+1, end, totalEntries)

		successful, failed := kc.processBatch(ctx, batch, action, repoPath)
		allSuccessful = append(allSuccessful, successful...)
		allFailed = append(allFailed, failed...)

		// 避免过快处理，给系统一些缓冲时间
		if end < totalEntries {
			select {
			case <-ctx.Done():
				return ctx.Err()
			case <-time.After(100 * time.Millisecond):
			}
		}
	}

	log.Printf("Task %s completed: %d successful, %d failed", taskID, len(allSuccessful), len(allFailed))

	// 将结果写入finished目录
	if len(allSuccessful) > 0 {
		if err := kc.storageManager.WriteMetadata(repoPath, allSuccessful, archive.StatusFinished, taskID); err != nil {
			log.Printf("Failed to write successful metadata: %v", err)
		}
	}

	if len(allFailed) > 0 {
		if err := kc.storageManager.WriteMetadata(repoPath, allFailed, archive.StatusFinished, taskID); err != nil {
			log.Printf("Failed to write failed metadata: %v", err)
		}
	}

	// 删除原始waiting文件
	if err := os.Remove(originalFilePath); err != nil {
		log.Printf("Failed to remove original waiting file %s: %v", originalFilePath, err)
	}

	// 压缩finished元数据
	if err := kc.storageManager.CompactFinishedMetadata(repoPath, refType); err != nil {
		log.Printf("Failed to compact finished metadata: %v", err)
	}

	return nil
}

// processBatch 处理一批引用
func (kc *KafkaConsumer) processBatch(ctx context.Context, entries []archive.MetadataEntry, action archive.OperationAction, repoPath string) (successful, failed []archive.MetadataEntry) {
	// 创建repository对象
	repo := &repository.Repository{
		StorageName:  "", // 这里需要根据repoPath推断storage name
		RelativePath: filepath.Base(repoPath),
	}

	var err error

	// 根据action执行批量git操作
	switch action {
	case archive.ActionArchive:
		err = kc.gitOperations.BatchArchiveRefs(ctx, repo, entries)
	case archive.ActionRestore:
		err = kc.gitOperations.BatchRestoreRefs(ctx, repo, entries)
	case archive.ActionDelete:
		err = kc.gitOperations.BatchDeleteRefs(ctx, repo, entries)
	default:
		err = fmt.Errorf("unknown action: %s", action)
	}

	if err != nil {
		// 如果批量操作失败，记录所有条目为失败
		log.Printf("Batch operation failed for action %s: %v", action, err)
		for _, entry := range entries {
			entry.Status = archive.StatusFinished
			entry.ErrorMessage = err.Error()
			entry.ErrorCode = 1
			entry.UpdateAt = time.Now()
			failed = append(failed, entry)
		}
	} else {
		// 批量操作成功，记录所有条目为成功
		log.Printf("Batch operation succeeded for action %s with %d entries", action, len(entries))
		for _, entry := range entries {
			entry.Status = archive.StatusFinished
			entry.ErrorMessage = ""
			entry.ErrorCode = 0
			entry.UpdateAt = time.Now()
			successful = append(successful, entry)
		}
	}

	return successful, failed
}

// processEntry 处理单个引用
func (kc *KafkaConsumer) processEntry(ctx context.Context, repo repository.GitRepo, entry *archive.MetadataEntry, action archive.OperationAction) error {
	// 根据action执行对应的git操作
	switch action {
	case archive.ActionArchive:
		return kc.gitOperations.ArchiveRef(ctx, repo, entry)
	case archive.ActionRestore:
		return kc.gitOperations.RestoreRef(ctx, repo, entry)
	case archive.ActionDelete:
		return kc.gitOperations.DeleteRef(ctx, repo, entry)
	default:
		return fmt.Errorf("unknown action: %s", action)
	}
}

// IsRunning returns whether the consumer is running
func (kc *KafkaConsumer) IsRunning() bool {
	kc.mu.RLock()
	defer kc.mu.RUnlock()
	return kc.running
}

// findMetadataFileByTaskMessage 根据TaskMessage中的信息直接定位元数据文件
func (kc *KafkaConsumer) findMetadataFileByTaskMessage(taskMessage *TaskMessage) ([]archive.MetadataEntry, string, string, error) {
	repoPath := taskMessage.RepositoryPath
	fileName := taskMessage.MetadataFileName

	// 尝试所有可能的引用类型
	refTypes := []string{"heads", "tags", "keep-around", "merge-requests"}

	for _, refType := range refTypes {
		// 构建所有可能的文件路径（waiting状态）
		possiblePaths := []string{
			filepath.Join(taskMessage.RepositoryPath, "info", "archive", "refs", refType, "waiting", fileName),
		}

		for _, filePath := range possiblePaths {
			if _, err := os.Stat(filePath); err == nil {
				// 文件存在，读取内容
				data, err := ioutil.ReadFile(filePath)
				if err != nil {
					continue
				}

				entries, taskID, err := archive.DeserializeMetadataByFormat(data)
				if err != nil {
					log.Printf("Failed to deserialize metadata from %s: %v", filePath, err)
					continue
				}

				// 检查taskID是否匹配
				if strings.Contains(taskMessage.TaskID, taskID) || strings.Contains(taskID, taskMessage.TaskID) {
					return entries, taskID, filePath, nil
				}
			}
		}
	}

	return nil, "", "", fmt.Errorf("metadata file not found for task %s", taskMessage.TaskID)
}
