package archive

import (
	"context"
	"fmt"
	"io/fs"
	"io/ioutil"
	"os"
	"path/filepath"
	"sort"
	"strings"
	"time"

	"gitlab.com/gitlab-org/gitaly/v14/internal/git"
	"gitlab.com/gitlab-org/gitaly/v14/internal/git/localrepo"
	"gitlab.com/gitlab-org/gitaly/v14/internal/git/repository"
	"gitlab.com/gitlab-org/gitaly/v14/internal/gitaly/config"
	"gitlab.com/gitlab-org/gitaly/v14/internal/storage"
)

const (
	// MaxMetadataFileEntries is the maximum number of entries per metadata file
	MaxMetadataFileEntries = 10000

	// MaxWaitingFileEntries is the maximum number of entries per waiting metadata file
	MaxWaitingFileEntries = 200

	// ArchiveSuffix is the suffix added to archive repository directories
	ArchiveSuffix = "-archived"
)

// StorageManager manages archive repository storage and metadata
type StorageManager struct {
	cfg           config.Cfg
	locator       storage.Locator
	gitCmdFactory git.CommandFactory
}

// NewStorageManager creates a new storage manager
func NewStorageManager(cfg config.Cfg, locator storage.Locator, gitCmdFactory git.CommandFactory) *StorageManager {
	return &StorageManager{
		cfg:           cfg,
		locator:       locator,
		gitCmdFactory: gitCmdFactory,
	}
}

// GetArchiveRepoPath returns the archive repository path for a given main repository
func (sm *StorageManager) GetArchiveRepoPath(mainRepoPath string) string {
	if strings.HasSuffix(mainRepoPath, ".git") {
		base := strings.TrimSuffix(mainRepoPath, ".git")
		return base + ArchiveSuffix + ".git"
	}
	return mainRepoPath + ArchiveSuffix
}

// EnsureArchiveRepo ensures the archive repository exists and is properly initialized
func (sm *StorageManager) EnsureArchiveRepo(ctx context.Context, mainRepo repository.GitRepo) (*localrepo.Repo, error) {
	mainRepoPath, err := sm.locator.GetRepoPath(mainRepo)
	if err != nil {
		return nil, fmt.Errorf("failed to get main repo path: %w", err)
	}

	archiveRepoPath := sm.GetArchiveRepoPath(mainRepoPath)

	// Check if archive repository exists
	if _, err := os.Stat(archiveRepoPath); os.IsNotExist(err) {
		// Create archive repository
		if err := sm.createArchiveRepo(ctx, archiveRepoPath); err != nil {
			return nil, fmt.Errorf("failed to create archive repository: %w", err)
		}
	} else if err != nil {
		return nil, fmt.Errorf("failed to stat archive repository: %w", err)
	}

	// Create a repository object for the archive repo
	archiveRepo := &repository.Repository{
		StorageName:  mainRepo.GetStorageName(),
		RelativePath: filepath.Base(archiveRepoPath),
	}

	return localrepo.New(sm.gitCmdFactory, nil, archiveRepo, sm.cfg), nil
}

// createArchiveRepo creates and initializes a new archive repository
func (sm *StorageManager) createArchiveRepo(ctx context.Context, repoPath string) error {
	// Create directory
	if err := os.MkdirAll(repoPath, 0755); err != nil {
		return fmt.Errorf("failed to create archive repo directory: %w", err)
	}

	// Initialize as bare git repository
	cmd := sm.gitCmdFactory.New(ctx, nil, git.WithRefTxHook(sm.cfg, repository.Repository{}))
	initCmd, err := cmd.New(ctx, git.SubCmd{
		Name:  "init",
		Flags: []git.Option{git.Flag{Name: "--bare"}},
	}, git.WithDir(repoPath))
	if err != nil {
		return fmt.Errorf("failed to create git init command: %w", err)
	}

	if err := initCmd.Wait(); err != nil {
		return fmt.Errorf("failed to initialize archive repository: %w", err)
	}

	return nil
}

// WriteMetadata writes metadata entries to a file
func (sm *StorageManager) WriteMetadata(repoPath string, entries []MetadataEntry, status OperationStatus, taskID string) error {
	if len(entries) == 0 {
		return nil
	}

	// Group entries by ref type
	entriesByRefType := make(map[string][]MetadataEntry)
	for _, entry := range entries {
		refType := ParseRefType(entry.SourceRefName)
		entriesByRefType[refType] = append(entriesByRefType[refType], entry)
	}

	// Write each ref type separately
	for refType, refEntries := range entriesByRefType {
		if err := sm.writeMetadataForRefType(repoPath, refType, refEntries, status, taskID); err != nil {
			return fmt.Errorf("failed to write metadata for ref type %s: %w", refType, err)
		}
	}

	return nil
}

// writeMetadataForRefType writes metadata for a specific ref type
func (sm *StorageManager) writeMetadataForRefType(repoPath, refType string, entries []MetadataEntry, status OperationStatus, taskID string) error {
	if len(entries) == 0 {
		return nil
	}

	now := time.Now()

	// For waiting status, split into multiple files if exceeds max entries
	if status == StatusWaiting {
		for i := 0; i < len(entries); i += MaxWaitingFileEntries {
			end := i + MaxWaitingFileEntries
			if end > len(entries) {
				end = len(entries)
			}

			batch := entries[i:end]
			batchTaskID := fmt.Sprintf("%s-%d", taskID, i/MaxWaitingFileEntries)

			// 确保所有entries都有相同的action
			var action OperationAction
			if len(batch) > 0 {
				action = batch[0].Action
				for _, entry := range batch {
					if entry.Action != action {
						return fmt.Errorf("all entries in the same metadata file must have the same action")
					}
				}
			}

			metadataPath := MetadataPath(repoPath, refType, status, now, batchTaskID, action)

			// Ensure directory exists
			if err := os.MkdirAll(filepath.Dir(metadataPath), 0755); err != nil {
				return fmt.Errorf("failed to create metadata directory: %w", err)
			}

			// Create metadata file using flat format for waiting
			metadataFile := &MetadataFile{
				Entries:   batch,
				Timestamp: now,
			}

			data, err := metadataFile.SerializeMetadata(status)
			if err != nil {
				return fmt.Errorf("failed to serialize metadata: %w", err)
			}

			if err := ioutil.WriteFile(metadataPath, data, 0644); err != nil {
				return fmt.Errorf("failed to write metadata file: %w", err)
			}
		}
	} else {
		// finished格式：直接使用entries的list格式
		metadataPath := MetadataPath(repoPath, refType, status, now, taskID, entries[0].Action)

		// Ensure directory exists
		if err := os.MkdirAll(filepath.Dir(metadataPath), 0755); err != nil {
			return fmt.Errorf("failed to create metadata directory: %w", err)
		}

		metadataFile := &MetadataFile{
			Entries:   entries,
			Timestamp: now,
		}

		data, err := metadataFile.SerializeMetadata(status)
		if err != nil {
			return fmt.Errorf("failed to serialize metadata: %w", err)
		}

		if err := ioutil.WriteFile(metadataPath, data, 0644); err != nil {
			return fmt.Errorf("failed to write metadata file: %w", err)
		}
	}

	return nil
}

// ReadMetadata reads metadata entries from files in a specific status directory
func (sm *StorageManager) ReadMetadata(repoPath, refType string, status OperationStatus) ([]MetadataEntry, error) {
	statusDir := filepath.Join(repoPath, "info", "archive", "refs", refType, string(status))

	// Check if directory exists
	if _, err := os.Stat(statusDir); os.IsNotExist(err) {
		return nil, nil // No metadata files
	} else if err != nil {
		return nil, fmt.Errorf("failed to stat metadata directory: %w", err)
	}

	var allEntries []MetadataEntry

	err := filepath.WalkDir(statusDir, func(path string, d fs.DirEntry, err error) error {
		if err != nil {
			return err
		}

		if d.IsDir() || !strings.HasSuffix(path, ".json") {
			return nil
		}

		data, err := ioutil.ReadFile(path)
		if err != nil {
			return fmt.Errorf("failed to read metadata file %s: %w", path, err)
		}

		entries, _, err := DeserializeMetadataByFormat(data)
		if err != nil {
			return fmt.Errorf("failed to deserialize metadata from %s: %w", path, err)
		}

		allEntries = append(allEntries, entries...)
		return nil
	})

	if err != nil {
		return nil, fmt.Errorf("failed to read metadata files: %w", err)
	}

	return allEntries, nil
}

// ReadMetadataFromFile reads metadata from a specific file path
func (sm *StorageManager) ReadMetadataFromFile(filePath string) ([]MetadataEntry, string, error) {
	data, err := ioutil.ReadFile(filePath)
	if err != nil {
		return nil, "", fmt.Errorf("failed to read file %s: %w", filePath, err)
	}

	entries, taskID, err := DeserializeMetadataByFormat(data)
	if err != nil {
		return nil, "", fmt.Errorf("failed to deserialize metadata from %s: %w", filePath, err)
	}

	return entries, taskID, nil
}

// MoveMetadata moves metadata from waiting to finished status
func (sm *StorageManager) MoveMetadata(repoPath, refType string, fromStatus, toStatus OperationStatus, taskID string) error {
	fromDir := filepath.Join(repoPath, "info", "archive", "refs", refType, string(fromStatus))
	toDir := filepath.Join(repoPath, "info", "archive", "refs", refType, string(toStatus))

	// Find the metadata file with matching task ID
	files, err := ioutil.ReadDir(fromDir)
	if err != nil {
		if os.IsNotExist(err) {
			return nil // No files to move
		}
		return fmt.Errorf("failed to read from directory: %w", err)
	}

	for _, file := range files {
		if file.IsDir() || !strings.HasSuffix(file.Name(), ".json") {
			continue
		}

		// Check if this file matches the task ID (for waiting files)
		if fromStatus == StatusWaiting && !strings.Contains(file.Name(), taskID) {
			continue
		}

		filePath := filepath.Join(fromDir, file.Name())
		data, err := ioutil.ReadFile(filePath)
		if err != nil {
			continue
		}

		entries, fileTaskID, err := DeserializeMetadataByFormat(data)
		if err != nil {
			continue
		}

		// Verify task ID matches (for waiting files)
		if fromStatus == StatusWaiting && !strings.Contains(fileTaskID, taskID) {
			continue
		}

		// Update status for all entries
		for i := range entries {
			entries[i].Status = toStatus
			entries[i].UpdateAt = time.Now()
		}

		// Ensure destination directory exists
		if err := os.MkdirAll(toDir, 0755); err != nil {
			return fmt.Errorf("failed to create destination directory: %w", err)
		}

		var destPath string
		var metadataFile *MetadataFile

		if toStatus == StatusFinished {
			// Moving to finished: use finished format and naming
			destPath = filepath.Join(toDir, fmt.Sprintf("refs-list-%d.json", time.Now().Unix()))
			metadataFile = &MetadataFile{
				Entries:   entries,
				Timestamp: time.Now(),
			}
		} else {
			// Moving to waiting: preserve the format and naming
			if len(entries) > 0 {
				action := entries[0].Action
				destPath = MetadataPath(repoPath, refType, toStatus, time.Now(), taskID, action)

				metadataFile = &MetadataFile{
					Entries:   entries,
					Timestamp: time.Now(),
				}
			}
		}

		newData, err := metadataFile.SerializeMetadata(toStatus)
		if err != nil {
			return fmt.Errorf("failed to serialize updated metadata: %w", err)
		}

		if err := ioutil.WriteFile(destPath, newData, 0644); err != nil {
			return fmt.Errorf("failed to write destination file: %w", err)
		}

		// Remove source file
		if err := os.Remove(filePath); err != nil {
			return fmt.Errorf("failed to remove source file: %w", err)
		}

		// For waiting files, we only process one file per task ID
		if fromStatus == StatusWaiting {
			break
		}
	}

	return nil
}

// GetOldestWaitingMetadata gets the oldest waiting metadata file for processing
func (sm *StorageManager) GetOldestWaitingMetadata(repoPath, refType string) ([]MetadataEntry, string, error) {
	waitingDir := filepath.Join(repoPath, "info", "archive", "refs", refType, string(StatusWaiting))

	// Check if directory exists
	if _, err := os.Stat(waitingDir); os.IsNotExist(err) {
		return nil, "", nil // No waiting files
	} else if err != nil {
		return nil, "", fmt.Errorf("failed to stat waiting directory: %w", err)
	}

	files, err := ioutil.ReadDir(waitingDir)
	if err != nil {
		return nil, "", fmt.Errorf("failed to read waiting directory: %w", err)
	}

	// Sort files by name (which includes timestamp)
	var jsonFiles []fs.FileInfo
	for _, file := range files {
		if !file.IsDir() && strings.HasSuffix(file.Name(), ".json") {
			jsonFiles = append(jsonFiles, file)
		}
	}

	if len(jsonFiles) == 0 {
		return nil, "", nil // No files to process
	}

	sort.Slice(jsonFiles, func(i, j int) bool {
		return jsonFiles[i].Name() < jsonFiles[j].Name()
	})

	// Read the oldest file
	oldestFile := jsonFiles[0]
	filePath := filepath.Join(waitingDir, oldestFile.Name())

	data, err := ioutil.ReadFile(filePath)
	if err != nil {
		return nil, "", fmt.Errorf("failed to read oldest metadata file: %w", err)
	}

	entries, taskID, err := DeserializeMetadataByFormat(data)
	if err != nil {
		return nil, "", fmt.Errorf("failed to deserialize oldest metadata: %w", err)
	}

	// Return entries and task ID (extracted from the file content for waiting files)
	return entries, taskID, nil
}
// CompactFinishedMetadata compacts finished metadata files to prevent too many small files
func (sm *StorageManager) CompactFinishedMetadata(repoPath, refType string) error {
	finishedDir := filepath.Join(repoPath, "info", "archive", "refs", refType, string(StatusFinished))

	// Check if directory exists
	if _, err := os.Stat(finishedDir); os.IsNotExist(err) {
		return nil // No finished files to compact
	} else if err != nil {
		return fmt.Errorf("failed to stat finished directory: %w", err)
	}

	files, err := ioutil.ReadDir(finishedDir)
	if err != nil {
		return fmt.Errorf("failed to read finished directory: %w", err)
	}

	// Filter and collect files that match the pattern refs-list-<timestamp>.json
	var compactableFiles []fs.FileInfo
	for _, file := range files {
		if file.IsDir() || !strings.HasSuffix(file.Name(), ".json") {
			continue
		}
		// Check if file matches refs-list-<timestamp>.json pattern
		if strings.HasPrefix(file.Name(), "refs-list-") {
			compactableFiles = append(compactableFiles, file)
		}
	}

	if len(compactableFiles) <= 1 {
		return nil // Need at least 2 files to compact
	}

	// Sort files by timestamp (extracted from filename)
	sort.Slice(compactableFiles, func(i, j int) bool {
		timestampI := extractTimestampFromFilename(compactableFiles[i].Name())
		timestampJ := extractTimestampFromFilename(compactableFiles[j].Name())
		return timestampI < timestampJ
	})

	const maxFileSize = 10 * 1024 * 1024 // 10MB

	var mergedEntries []MetadataEntry
	var currentSize int64
	var filesToRemove []string
	var compactedFileCount int

	for i, file := range compactableFiles {
		filePath := filepath.Join(finishedDir, file.Name())

		// Read file content and entries
		data, err := ioutil.ReadFile(filePath)
		if err != nil {
			continue
		}

		entries, _, err := DeserializeMetadataByFormat(data)
		if err != nil {
			continue
		}

		fileSize := file.Size()

		// Check if adding this file would exceed 10MB limit
		if currentSize+fileSize > maxFileSize && len(mergedEntries) > 0 {
			// Save current merged content before adding this file
			if err := sm.saveCompactedFile(finishedDir, mergedEntries, &compactedFileCount); err != nil {
				return fmt.Errorf("failed to save compacted file: %w", err)
			}

			// Remove processed files
			for _, pathToRemove := range filesToRemove {
				if err := os.Remove(pathToRemove); err != nil {
					return fmt.Errorf("failed to remove original file %s: %w", pathToRemove, err)
				}
			}

			// Reset for next batch
			mergedEntries = nil
			currentSize = 0
			filesToRemove = nil
		}

		// Add current file to merge batch
		mergedEntries = append(mergedEntries, entries...)
		currentSize += fileSize
		filesToRemove = append(filesToRemove, filePath)

		// If this is the last file, save the remaining merged content
		if i == len(compactableFiles)-1 && len(mergedEntries) > 0 {
			if err := sm.saveCompactedFile(finishedDir, mergedEntries, &compactedFileCount); err != nil {
				return fmt.Errorf("failed to save final compacted file: %w", err)
			}

			// Remove processed files
			for _, pathToRemove := range filesToRemove {
				if err := os.Remove(pathToRemove); err != nil {
					return fmt.Errorf("failed to remove original file %s: %w", pathToRemove, err)
				}
			}
		}
	}

	return nil
}

// saveCompactedFile saves merged entries to a new compacted file
func (sm *StorageManager) saveCompactedFile(finishedDir string, entries []MetadataEntry, fileCount *int) error {
	if len(entries) == 0 {
		return nil
	}

	compactedFile := &MetadataFile{
		Entries:   entries,
		Timestamp: time.Now(),
	}

	data, err := compactedFile.SerializeMetadata(StatusFinished)
	if err != nil {
		return fmt.Errorf("failed to serialize compacted metadata: %w", err)
	}

	*fileCount++
	compactedPath := filepath.Join(finishedDir, fmt.Sprintf("refs-list-%d.json", time.Now().Unix()))

	if err := ioutil.WriteFile(compactedPath, data, 0644); err != nil {
		return fmt.Errorf("failed to write compacted file: %w", err)
	}

	return nil
}

// extractTimestampFromFilename extracts timestamp from filename like refs-list-<timestamp>.json
func extractTimestampFromFilename(filename string) int64 {
	// Remove prefix and suffix
	name := strings.TrimPrefix(filename, "refs-list-")
	name = strings.TrimSuffix(name, ".json")

	timestamp, err := strconv.ParseInt(name, 10, 64)
	if err != nil {
		return 0 // Return 0 if parsing fails
	}

	return timestamp
}
