package service

import (
	"context"
	"crypto/md5"
	"encoding/hex"
	"fmt"
	"io"
	"jdfs2/internal/consts"
	"jdfs2/internal/multipart"
	"jdfs2/internal/storage"
	"jdfs2/internal/utils"
	"log"
	"os"
	"path/filepath"
	"sort"
	"strings"
	"time"
)

// S3Service interface defines the S3 service operations
type S3Service interface {
	ListBuckets(ctx context.Context) ([]BucketInfo, error)
	CreateBucket(ctx context.Context, bucket string) error
	DeleteBucket(ctx context.Context, bucket string) error
	HeadBucket(ctx context.Context, bucket string) error
	GetBucketLocation(ctx context.Context, bucket string) (string, error)
	GetBucketVersioning(ctx context.Context, bucket string) (string, error)
	PutBucketVersioning(ctx context.Context, bucket, status string) error
	GetBucketPolicy(ctx context.Context, bucket string) (string, error)
	PutBucketPolicy(ctx context.Context, bucket, policy string) error
	DeleteBucketPolicy(ctx context.Context, bucket string) error
	GetBucketACL(ctx context.Context, bucket string) (string, error)
	PutBucketACL(ctx context.Context, bucket, acl string) error
	GetBucketLifecycleConfiguration(ctx context.Context, bucket string) (string, error)
	PutBucketLifecycleConfiguration(ctx context.Context, bucket, lifecycle string) error
	DeleteBucketLifecycleConfiguration(ctx context.Context, bucket string) error
	ListObjectsV2(ctx context.Context, bucket, prefix, delimiter, startAfter, continuationToken string, maxKeys int) (*ListObjectsV2Result, error)

	PutObject(ctx context.Context, bucket, key string, body io.Reader, contentLength int64, contentType string, userMeta map[string]string, contentMD5 string) (string, error)
	GetObject(ctx context.Context, bucket, key string) (*os.File, *storage.DistributedObjectMeta, error)
	DeleteObject(ctx context.Context, bucket, key string) error
	HeadObject(ctx context.Context, bucket, key string) (*storage.DistributedObjectMeta, error)
	CopyObject(ctx context.Context, dstBucket, dstKey, srcBucket, srcKey string) (string, error)

	InitiateMultipart(ctx context.Context, bucket, key string) (string, error)
	UploadPart(ctx context.Context, uploadID string, partNumber int, body io.Reader, contentMD5 string) (string, int64, error)
	UploadPartCopy(ctx context.Context, uploadID string, partNumber int, srcBucket, srcKey string, start, end *int64) (string, int64, error)
	CompleteMultipart(ctx context.Context, bucket, key, uploadID string, parts []multipart.CompletePart) (string, error)
	AbortMultipart(ctx context.Context, uploadID string) error
	ListParts(ctx context.Context, uploadID string, partNumberMarker, maxParts int) ([]multipart.PartInfo, int, bool, error)
	ListMultipartUploads(ctx context.Context, bucket, prefix string, maxUploads int) ([]multipart.UploadInfo, bool, error)
	Close() error
}

type s3Service struct {
	fs            *storage.FS
	appendStorage *storage.AppendStorage
	meta          *storage.DistributedMetaStore
	root          string
	mpu           *multipart.Manager
}

var s3Impl *s3Service

func S3() S3Service {
	// Simple singleton pattern for the teaching project.
	if s3Impl != nil {
		return s3Impl
	}

	// Prefer environment variable S3_ROOT, fallback to project runtime path
	rootPath := os.Getenv("S3_ROOT")
	if rootPath == "" {
		rootPath = "runtime/s3-data"
	}

	// Get volume size from environment variable, default to 200MB
	volumeSize := int64(200 * 1024 * 1024) // 200MB default
	if volumeSizeStr := os.Getenv("S3_VOLUME_SIZE"); volumeSizeStr != "" {
		if size, err := fmt.Sscanf(volumeSizeStr, "%d", &volumeSize); err != nil || size != 1 {
			log.Printf("Invalid S3_VOLUME_SIZE %s, using default 200MB", volumeSizeStr)
			volumeSize = 200 * 1024 * 1024
		}
	}

	// Ensure the root directory exists.
	if err := os.MkdirAll(rootPath, 0755); err != nil {
		log.Fatalf("Failed to create s3.root directory %s: %v", rootPath, err)
	}

	metaPath := filepath.Join(rootPath, ".meta", "badger")
	if err := os.MkdirAll(metaPath, 0755); err != nil {
		log.Fatalf("Failed to create meta directory %s: %v", metaPath, err)
	}
	metaStore, err := storage.NewDistributedMetaStore(metaPath, "node-001")
	if err != nil {
		log.Fatalf("Failed to initialize DistributedMetaStore: %v", err)
	}

	// Initialize append storage
	appendStorage, err := storage.NewAppendStorage(rootPath, volumeSize)
	if err != nil {
		log.Fatalf("Failed to initialize AppendStorage: %v", err)
	}

	s3Impl = &s3Service{
		fs:            storage.NewFS(rootPath),
		appendStorage: appendStorage,
		meta:          metaStore,
		root:          rootPath,
		mpu:           multipart.NewManager(filepath.Join(rootPath, ".uploads")),
	}

	return s3Impl
}

func (s *s3Service) CreateBucket(ctx context.Context, bucketName string) error {
	// In S3, creating a bucket is implicitly done by the first object creation
	// or can be done explicitly. Here we represent it by creating a directory.
	// We also need to check if the bucket already exists to avoid errors.
	bucketPath := filepath.Join(s.root, bucketName)
	if _, err := os.Stat(bucketPath); !os.IsNotExist(err) {
		// Bucket already exists. Return the pre-defined S3 error.
		return utils.ErrBucketAlreadyExists
	}

	err := os.Mkdir(bucketPath, 0755)
	if err != nil {
		return fmt.Errorf("failed to create bucket directory for '%s': %w", bucketName, err)
	}

	log.Printf("Successfully created bucket '%s'", bucketName)
	return nil
}

// PutObject handles the business logic of uploading an object.
// It calculates the ETag (MD5 hash) while streaming the object to the append storage,
// then stores the object's metadata in the MetaStore.
func (s *s3Service) PutObject(ctx context.Context, bucket, key string, data io.Reader, size int64, contentType string, userMeta map[string]string, contentMD5 string) (string, error) {
	// First, check if the bucket exists.
	bucketPath := filepath.Join(s.root, bucket)
	log.Printf("Checking for bucket existence at path: %s", bucketPath)
	if _, err := os.Stat(bucketPath); os.IsNotExist(err) {
		log.Printf("WARN: Bucket existence check failed for path '%s': directory does not exist.", bucketPath)
		return "", utils.ErrNoSuchBucket
	}

	// Use an io.TeeReader to calculate the MD5 hash on the fly while writing the file.
	hash := md5.New()
	teeReader := io.TeeReader(data, hash)

	// Store the object in append storage.
	segments, err := s.appendStorage.PutObject(bucket, key, teeReader, size)
	if err != nil {
		return "", fmt.Errorf("failed to put object in append storage: %w", err)
	}

	// Create a placeholder file in the bucket directory for ListObjectsV2 compatibility
	placeholderPath := filepath.Join(s.root, bucket, key)
	if err := os.MkdirAll(filepath.Dir(placeholderPath), 0755); err != nil {
		log.Printf("WARN: Failed to create placeholder directory for %s/%s: %v", bucket, key, err)
	} else {
		// Create an empty placeholder file
		if err := os.WriteFile(placeholderPath, []byte{}, 0644); err != nil {
			log.Printf("WARN: Failed to create placeholder file for %s/%s: %v", bucket, key, err)
		}
	}

	// Calculate the ETag.
	etag := hex.EncodeToString(hash.Sum(nil))

	// Verify Content-MD5 if provided
	if contentMD5 != "" {
		if etag != contentMD5 {
			log.Printf("Content-MD5 mismatch for %s/%s: expected %s, got %s", bucket, key, contentMD5, etag)
			return "", utils.NewS3Error(400, "BadDigest", "The Content-MD5 you specified did not match what we received.")
		}
		log.Printf("Content-MD5 verified for %s/%s: %s", bucket, key, etag)
	}

	// Create and store the metadata.
	meta := storage.NewDistributedObjectMeta()
	meta.Size = size
	meta.ETag = etag
	meta.LastModified = time.Now().Unix()
	meta.ContentType = contentType
	meta.UserMeta = userMeta
	meta.Segments = segments

	// 向后兼容：如果只有一个片段，也设置旧的字段
	if len(segments) == 1 {
		meta.VolumeID = int32(segments[0].VolumeID)
		meta.Offset = segments[0].Offset
	}

	err = s.meta.PutMeta(bucket, key, meta)
	if err != nil {
		// This is a tricky situation. The file is written but metadata failed.
		// A robust implementation would have a cleanup/rollback mechanism.
		// For this teaching project, we'll just log the error.
		log.Printf("CRITICAL: Object data for %s/%s was written, but metadata failed to save: %v", bucket, key, err)
		return "", fmt.Errorf("failed to put object metadata: %w", err)
	}

	if len(segments) == 1 {
		log.Printf("Successfully uploaded object %s/%s with ETag %s to volume %d at offset %d", bucket, key, etag, segments[0].VolumeID, segments[0].Offset)
	} else {
		log.Printf("Successfully uploaded object %s/%s with ETag %s across %d segments", bucket, key, etag, len(segments))
	}

	return etag, nil
}

// GetObject retrieves an object's data and metadata.
// It returns a file handle for streaming and the object's metadata.
func (s *s3Service) GetObject(ctx context.Context, bucket, key string) (*os.File, *storage.DistributedObjectMeta, error) {
	// First, retrieve the metadata to ensure the object exists and to get its properties.
	meta, err := s.meta.GetMeta(bucket, key)
	if err != nil {
		return nil, nil, err // This will be ErrNoSuchKey if not found.
	}

	// Check if this is an append storage object (has volume info)
	if len(meta.Segments) > 0 {
		// Get object from append storage using new multi-segment approach
		reader, err := s.appendStorage.GetObject(bucket, key, meta.Segments)
		if err != nil {
			log.Printf("CRITICAL: Metadata for %s/%s exists, but object data is missing from segments: %v", bucket, key, err)
			return nil, nil, err
		}

		// Convert reader to *os.File by creating a temporary file
		// This is a workaround since the interface expects *os.File
		tmpFile, err := os.CreateTemp("", "jdfs2_get_*")
		if err != nil {
			reader.Close()
			return nil, nil, fmt.Errorf("failed to create temp file: %w", err)
		}

		// Copy data from reader to temp file
		_, err = io.Copy(tmpFile, reader)
		reader.Close()
		if err != nil {
			tmpFile.Close()
			os.Remove(tmpFile.Name())
			return nil, nil, fmt.Errorf("failed to copy data to temp file: %w", err)
		}

		// Seek to beginning of temp file
		if _, err := tmpFile.Seek(0, io.SeekStart); err != nil {
			tmpFile.Close()
			os.Remove(tmpFile.Name())
			return nil, nil, fmt.Errorf("failed to seek temp file: %w", err)
		}

		return tmpFile, meta, nil
	} else if meta.VolumeID > 0 {
		// Backward compatibility: use legacy single-volume approach
		reader, err := s.appendStorage.GetObjectLegacy(bucket, key, int(meta.VolumeID), meta.Offset, meta.Size)
		if err != nil {
			log.Printf("CRITICAL: Metadata for %s/%s exists, but object data is missing from volume %d: %v", bucket, key, meta.VolumeID, err)
			return nil, nil, err
		}

		// Convert reader to *os.File by creating a temporary file
		tmpFile, err := os.CreateTemp("", "jdfs2_get_*")
		if err != nil {
			reader.Close()
			return nil, nil, fmt.Errorf("failed to create temp file: %w", err)
		}

		// Copy data from reader to temp file
		_, err = io.Copy(tmpFile, reader)
		reader.Close()
		if err != nil {
			tmpFile.Close()
			os.Remove(tmpFile.Name())
			return nil, nil, fmt.Errorf("failed to copy data to temp file: %w", err)
		}

		// Seek to beginning of temp file
		if _, err := tmpFile.Seek(0, io.SeekStart); err != nil {
			tmpFile.Close()
			os.Remove(tmpFile.Name())
			return nil, nil, fmt.Errorf("failed to seek temp file: %w", err)
		}

		return tmpFile, meta, nil
	}

	// Fallback to filesystem storage for backward compatibility
	file, err := s.fs.GetObject(bucket, key)
	if err != nil {
		// This would be an unexpected error, as metadata existence should imply file existence.
		log.Printf("CRITICAL: Metadata for %s/%s exists, but object data is missing: %v", bucket, key, err)
		return nil, nil, err
	}

	return file, meta, nil
}

// DeleteObject handles the business logic for deleting an object.
// In append mode, we move metadata to deleted records table instead of deleting it.
func (s *s3Service) DeleteObject(ctx context.Context, bucket, key string) error {
	// Get metadata first to check if it's an append storage object
	meta, err := s.meta.GetMeta(bucket, key)
	if err != nil {
		// Object doesn't exist, which is fine for S3 (idempotent)
		return nil
	}

	// Create deleted record from the metadata
	deletedRecord := &storage.DeletedObjectRecord{
		Bucket:       bucket,
		Key:          key,
		DeletedAt:    time.Now(),
		Size:         meta.Size,
		ETag:         meta.ETag,
		LastModified: time.Unix(meta.LastModified, 0),
		ContentType:  meta.ContentType,
		UserMeta:     meta.UserMeta,
		VolumeID:     int(meta.VolumeID),
		Offset:       meta.Offset,
		Segments:     meta.Segments,
	}

	// Save the deleted record before removing the original metadata
	if err := s.meta.PutDeletedRecord(bucket, key, deletedRecord); err != nil {
		log.Printf("WARN: Failed to save deleted record for %s/%s: %v", bucket, key, err)
		return fmt.Errorf("failed to save deleted record: %w", err)
	}

	// For append storage objects, we only delete placeholder file
	// The actual data remains in the volume for garbage collection
	if len(meta.Segments) > 0 {
		volumeIDs := make([]int, len(meta.Segments))
		for i, seg := range meta.Segments {
			volumeIDs[i] = seg.VolumeID
		}
		log.Printf("Deleting append storage object %s/%s from volumes %v (data remains in volumes for garbage collection)", bucket, key, volumeIDs)
		// Delete the placeholder file
		placeholderPath := filepath.Join(s.root, bucket, key)
		if err := os.Remove(placeholderPath); err != nil && !os.IsNotExist(err) {
			log.Printf("WARN: Failed to delete placeholder file for %s/%s: %v", bucket, key, err)
		}
	} else if meta.VolumeID > 0 {
		// Backward compatibility
		log.Printf("Deleting append storage object %s/%s from volume %d (data remains in volume for garbage collection)", bucket, key, meta.VolumeID)
		// Delete the placeholder file
		placeholderPath := filepath.Join(s.root, bucket, key)
		if err := os.Remove(placeholderPath); err != nil && !os.IsNotExist(err) {
			log.Printf("WARN: Failed to delete placeholder file for %s/%s: %v", bucket, key, err)
		}
	} else {
		// Fallback to filesystem storage for backward compatibility
		if err := s.fs.DeleteObject(bucket, key); err != nil {
			log.Printf("WARN: Failed to delete object data for %s/%s: %v", bucket, key, err)
			return fmt.Errorf("failed to delete object data: %w", err)
		}
	}

	// Delete the original metadata (now that we have the deleted record)
	if err := s.meta.DeleteMeta(bucket, key); err != nil {
		log.Printf("WARN: Failed to delete object metadata for %s/%s: %v", bucket, key, err)
		return fmt.Errorf("failed to delete object metadata: %w", err)
	}

	log.Printf("Successfully deleted object %s/%s (metadata moved to deleted records)", bucket, key)
	return nil
}

func (s *s3Service) CopyObject(ctx context.Context, dstBucket, dstKey, srcBucket, srcKey string) (string, error) {
	// Read source
	sf, meta, err := s.GetObject(ctx, srcBucket, srcKey)
	if err != nil {
		return "", err
	}
	defer sf.Close()

	// Write destination
	hash := md5.New()
	dstPath := filepath.Join(s.root, dstBucket, dstKey)
	if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil {
		return "", err
	}
	tmp := dstPath + ".copy.tmp"
	out, err := os.Create(tmp)
	if err != nil {
		return "", err
	}
	n, err := io.Copy(io.MultiWriter(out, hash), sf)
	_ = out.Close()
	if err != nil {
		_ = os.Remove(tmp)
		return "", err
	}
	if err := os.Rename(tmp, dstPath); err != nil {
		return "", err
	}
	etag := hex.EncodeToString(hash.Sum(nil))
	newMeta := storage.NewDistributedObjectMeta()
	newMeta.Size = n
	newMeta.ETag = etag
	newMeta.LastModified = time.Now().Unix()
	newMeta.ContentType = meta.ContentType
	newMeta.UserMeta = meta.UserMeta
	if err := s.meta.PutMeta(dstBucket, dstKey, newMeta); err != nil {
		return "", err
	}
	return etag, nil
}

// Multipart APIs

// InitiateMultipart starts a multipart upload and returns uploadID.
func (s *s3Service) InitiateMultipart(ctx context.Context, bucket, key string) (string, error) {
	bucketPath := filepath.Join(s.root, bucket)
	if stat, err := os.Stat(bucketPath); err != nil || !stat.IsDir() {
		return "", utils.ErrNoSuchBucket
	}
	return s.mpu.Initiate(bucket, key)
}

// UploadPart uploads a single part and returns the part ETag.
func (s *s3Service) UploadPart(ctx context.Context, uploadID string, partNumber int, r io.Reader, contentMD5 string) (string, int64, error) {
	etag, n, err := s.mpu.UploadPart(uploadID, partNumber, r)
	if err != nil {
		return "", 0, err
	}

	// Verify Content-MD5 if provided
	if contentMD5 != "" {
		if etag != contentMD5 {
			log.Printf("Content-MD5 mismatch for part %d of upload %s: expected %s, got %s", partNumber, uploadID, contentMD5, etag)
			return "", 0, utils.NewS3Error(400, "BadDigest", "The Content-MD5 you specified did not match what we received.")
		}
		log.Printf("Content-MD5 verified for part %d of upload %s: %s", partNumber, uploadID, etag)
	}

	return etag, n, nil
}

func (s *s3Service) UploadPartCopy(ctx context.Context, uploadID string, partNumber int, srcBucket, srcKey string, start, end *int64) (string, int64, error) {
	f, err := s.fs.GetObject(srcBucket, srcKey)
	if err != nil {
		return "", 0, err
	}
	defer f.Close()

	var r io.Reader = f
	if start != nil {
		if _, err := f.Seek(*start, io.SeekStart); err != nil {
			return "", 0, err
		}
		var n int64
		if end != nil && *end >= *start {
			n = (*end - *start) + 1
		}
		if n > 0 {
			r = io.LimitReader(f, n)
		}
	}
	return s.mpu.UploadPart(uploadID, partNumber, r)
}

// CompleteMultipart completes a multipart upload by concatenating parts and writing metadata.
func (s *s3Service) CompleteMultipart(ctx context.Context, bucket, key, uploadID string, parts []multipart.CompletePart) (string, error) {
	dst := filepath.Join(s.root, bucket, key)
	if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
		return "", err
	}
	etag, size, err := s.mpu.CompleteTo(uploadID, dst, parts)
	if err != nil {
		return "", err
	}
	meta := storage.NewDistributedObjectMeta()
	meta.Size = size
	meta.ETag = etag
	meta.LastModified = time.Now().Unix()
	meta.ContentType = "application/octet-stream"
	meta.UserMeta = map[string]string{}
	if err := s.meta.PutMeta(bucket, key, meta); err != nil {
		return "", err
	}
	return etag, nil
}

// AbortMultipart aborts an in-progress multipart upload.
func (s *s3Service) AbortMultipart(ctx context.Context, uploadID string) error {
	return s.mpu.Abort(uploadID)
}

func (s *s3Service) ListParts(ctx context.Context, uploadID string, partNumberMarker, maxParts int) ([]multipart.PartInfo, int, bool, error) {
	return s.mpu.ListParts(uploadID, partNumberMarker, maxParts)
}

func (s *s3Service) ListMultipartUploads(ctx context.Context, bucket, prefix string, maxUploads int) ([]multipart.UploadInfo, bool, error) {
	return s.mpu.ListUploads(bucket, prefix, maxUploads)
}

// Close releases resources owned by the service.
func (s *s3Service) Close() error {
	if s == nil {
		return nil
	}
	if s.meta != nil {
		return s.meta.Close()
	}
	return nil
}

// ResetS3ForTest resets the singleton for tests. Not intended for production use.
func ResetS3ForTest() {
	if s3Impl != nil {
		_ = s3Impl.Close()
	}
	s3Impl = nil
}

// BucketInfo describes a bucket entry.
type BucketInfo struct {
	Name         string
	CreationDate time.Time
}

// ObjectInfo describes an object entry.
type ObjectInfo struct {
	Key          string
	Size         int64
	ETag         string
	LastModified time.Time
}

// ListObjectsV2Result is a simplified result for V2 listing.
type ListObjectsV2Result struct {
	Name                  string
	Prefix                string
	Delimiter             string
	MaxKeys               int
	KeyCount              int
	IsTruncated           bool
	ContinuationToken     string
	NextContinuationToken string
	Contents              []ObjectInfo
	CommonPrefixes        []string
}

// ListBuckets returns buckets under root (directories only).
func (s *s3Service) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
	entries, err := os.ReadDir(s.root)
	if err != nil {
		return nil, err
	}
	var out []BucketInfo
	for _, e := range entries {
		if e.IsDir() && e.Name() != ".meta" {
			info, err := e.Info()
			if err != nil {
				continue
			}
			out = append(out, BucketInfo{Name: e.Name(), CreationDate: info.ModTime()})
		}
	}
	return out, nil
}

// HeadBucket checks if bucket exists
func (s *s3Service) HeadBucket(ctx context.Context, bucket string) error {
	bucketPath := filepath.Join(s.root, bucket)
	if stat, err := os.Stat(bucketPath); err != nil || !stat.IsDir() {
		return utils.ErrNoSuchBucket
	}
	return nil
}

// DeleteBucket deletes an empty bucket and clears its configs and metadata
func (s *s3Service) DeleteBucket(ctx context.Context, bucket string) error {
	bucketPath := filepath.Join(s.root, bucket)
	// Check existence
	if stat, err := os.Stat(bucketPath); err != nil || !stat.IsDir() {
		return utils.ErrNoSuchBucket
	}
	// Ensure empty (ignore hidden meta dirs)
	entries, err := os.ReadDir(bucketPath)
	if err != nil {
		return err
	}
	for _, e := range entries {
		name := e.Name()
		if name == "." || name == ".." || name == ".meta" || name == ".uploads" {
			continue
		}
		// If any file or dir other than our internal markers exists, treat as not empty
		return utils.ErrBucketNotEmpty
	}
	// Delete bucket directory
	if err := os.RemoveAll(bucketPath); err != nil {
		return err
	}
	// Cleanup metadata and configs
	if err := s.meta.DeleteAllMetaInBucket(bucket); err != nil {
		return err
	}
	if err := s.meta.DeleteBucketConfig(bucket); err != nil {
		return err
	}
	return nil
}

// GetBucketLocation returns the configured or default region
func (s *s3Service) GetBucketLocation(ctx context.Context, bucket string) (string, error) {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return "", err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return "", err
	}
	if cfg.Region != "" {
		return cfg.Region, nil
	}
	return consts.DefaultRegion, nil
}

// GetBucketVersioning returns current bucket versioning status
func (s *s3Service) GetBucketVersioning(ctx context.Context, bucket string) (string, error) {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return "", err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return "", err
	}
	return cfg.Versioning, nil
}

// PutBucketVersioning sets bucket versioning status: "Enabled" or "Suspended"
func (s *s3Service) PutBucketVersioning(ctx context.Context, bucket, status string) error {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return err
	}
	cfg.Versioning = status
	return s.meta.PutBucketConfig(bucket, cfg)
}

// GetBucketPolicy returns raw bucket policy JSON
func (s *s3Service) GetBucketPolicy(ctx context.Context, bucket string) (string, error) {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return "", err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return "", err
	}
	if cfg.Policy == "" {
		return "", utils.ErrNoSuchBucketPolicy
	}
	return cfg.Policy, nil
}

// PutBucketPolicy sets raw policy JSON string
func (s *s3Service) PutBucketPolicy(ctx context.Context, bucket, policy string) error {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return err
	}
	cfg.Policy = policy
	return s.meta.PutBucketConfig(bucket, cfg)
}

// DeleteBucketPolicy removes the policy
func (s *s3Service) DeleteBucketPolicy(ctx context.Context, bucket string) error {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return err
	}
	cfg.Policy = ""
	return s.meta.PutBucketConfig(bucket, cfg)
}

// GetBucketACL returns stored canned ACL
func (s *s3Service) GetBucketACL(ctx context.Context, bucket string) (string, error) {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return "", err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return "", err
	}
	if cfg.ACL == "" {
		return "private", nil
	}
	return cfg.ACL, nil
}

// PutBucketACL sets a canned ACL string
func (s *s3Service) PutBucketACL(ctx context.Context, bucket, acl string) error {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return err
	}
	cfg.ACL = acl
	return s.meta.PutBucketConfig(bucket, cfg)
}

// HeadObject returns only metadata if exists.
func (s *s3Service) HeadObject(ctx context.Context, bucket, key string) (*storage.DistributedObjectMeta, error) {
	// Ensure bucket exists
	bucketPath := filepath.Join(s.root, bucket)
	if _, err := os.Stat(bucketPath); os.IsNotExist(err) {
		return nil, utils.ErrNoSuchBucket
	}
	meta, err := s.meta.GetMeta(bucket, key)
	if err != nil {
		return nil, err
	}
	return meta, nil
}

// ListObjectsV2 lists objects with optional prefix and delimiter.
func (s *s3Service) ListObjectsV2(ctx context.Context, bucket, prefix, delimiter, startAfter, continuationToken string, maxKeys int) (*ListObjectsV2Result, error) {
	if maxKeys <= 0 {
		maxKeys = 1000
	}
	// Validate bucket
	bucketPath := filepath.Join(s.root, bucket)
	if stat, err := os.Stat(bucketPath); err != nil || !stat.IsDir() {
		return nil, utils.ErrNoSuchBucket
	}

	var contFrom string
	if continuationToken != "" {
		if b, err := hex.DecodeString(continuationToken); err == nil {
			contFrom = string(b)
		}
	}

	// Walk bucket directory to collect keys
	var keys []string
	err := filepath.WalkDir(bucketPath, func(path string, d os.DirEntry, err error) error {
		if err != nil {
			return err
		}
		if d.IsDir() {
			return nil
		}
		rel, err := filepath.Rel(bucketPath, path)
		if err != nil {
			return nil
		}
		rel = filepath.ToSlash(rel)
		if startAfter != "" && rel <= startAfter {
			return nil
		}
		if contFrom != "" && rel <= contFrom {
			return nil
		}
		if prefix == "" || (len(rel) >= len(prefix) && rel[:len(prefix)] == prefix) {
			keys = append(keys, rel)
		}
		return nil
	})
	if err != nil {
		return nil, err
	}
	sort.Strings(keys)

	contents := make([]ObjectInfo, 0, len(keys))
	commonPrefixSet := make(map[string]struct{})

	for _, k := range keys {
		if delimiter != "" {
			// compute common prefixes relative to prefix
			rest := strings.TrimPrefix(k, prefix)
			if idx := strings.Index(rest, delimiter); idx >= 0 {
				cp := prefix + rest[:idx+len(delimiter)]
				commonPrefixSet[cp] = struct{}{}
				continue
			}
		}
		// regular object
		meta, err := s.meta.GetMeta(bucket, k)
		if err != nil {
			// skip if meta missing
			continue
		}
		contents = append(contents, ObjectInfo{Key: k, Size: meta.Size, ETag: meta.ETag, LastModified: time.Unix(meta.LastModified, 0)})
		if len(contents) >= maxKeys {
			break
		}
	}

	// build common prefixes list
	var commonPrefixes []string
	for cp := range commonPrefixSet {
		commonPrefixes = append(commonPrefixes, cp)
	}
	sort.Strings(commonPrefixes)

	isTruncated := len(keys) > len(contents)
	var nextToken string
	if isTruncated && len(contents) > 0 {
		lastKey := contents[len(contents)-1].Key
		nextToken = hex.EncodeToString([]byte(lastKey))
	}

	res := &ListObjectsV2Result{
		Name:                  bucket,
		Prefix:                prefix,
		Delimiter:             delimiter,
		MaxKeys:               maxKeys,
		KeyCount:              len(contents),
		IsTruncated:           isTruncated,
		ContinuationToken:     continuationToken,
		NextContinuationToken: nextToken,
		Contents:              contents,
		CommonPrefixes:        commonPrefixes,
	}
	return res, nil
}

// GetBucketLifecycleConfiguration returns current bucket lifecycle configuration
func (s *s3Service) GetBucketLifecycleConfiguration(ctx context.Context, bucket string) (string, error) {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return "", err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return "", err
	}
	if cfg.Lifecycle == "" {
		return "", utils.ErrNoSuchLifecycleConfiguration
	}
	return cfg.Lifecycle, nil
}

// PutBucketLifecycleConfiguration sets bucket lifecycle configuration
func (s *s3Service) PutBucketLifecycleConfiguration(ctx context.Context, bucket, lifecycle string) error {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return err
	}
	cfg.Lifecycle = lifecycle
	return s.meta.PutBucketConfig(bucket, cfg)
}

// DeleteBucketLifecycleConfiguration removes bucket lifecycle configuration
func (s *s3Service) DeleteBucketLifecycleConfiguration(ctx context.Context, bucket string) error {
	if err := s.HeadBucket(ctx, bucket); err != nil {
		return err
	}
	cfg, err := s.meta.GetBucketConfig(bucket)
	if err != nil {
		return err
	}
	cfg.Lifecycle = ""
	return s.meta.PutBucketConfig(bucket, cfg)
}
