package service

import (
	"bytes"
	"crypto/md5"
	"crypto/sha256"
	"encoding/hex"
	"errors"
	"fmt"
	"go-pan/internal/model"
	"go-pan/internal/repository"
	"go-pan/internal/storage/provider"
	utils2 "go-pan/internal/utils"
	"io"
	"log"
	"path/filepath"
	"sort"
	"strconv"
	"time"

	"github.com/gin-gonic/gin"
	"github.com/google/uuid"
)

type UploadService interface {
	InitUpload(ctx *gin.Context, fileName string, fileSize int64, mimeType string, ownerID int64) (*model.UploadTask, error)
	UploadChunk(ctx *gin.Context, uploadID int64, chunkIndex int, size int64, reader io.Reader) error
	CompleteUpload(ctx *gin.Context, uploadID int64, clientHash string) error
	GetUploadStatus(ctx *gin.Context, uploadID int64) (*model.UploadTask, error)
}

type UploadServiceImpl struct {
	taskRepo        repository.UploadTaskRepository
	chunkRepo       repository.UploadChunkRepository
	fileMetaRepo    repository.FileMetaRepository
	fileChunkRepo   repository.FileChunkRepository
	storageProvider provider.StorageProvider
}

func NewUploadService(taskRepo repository.UploadTaskRepository, chunkRepo repository.UploadChunkRepository,
	fileMetaRepo repository.FileMetaRepository, fileChunkRepo repository.FileChunkRepository, provider provider.StorageProvider) UploadService {
	return &UploadServiceImpl{
		taskRepo:        taskRepo,
		chunkRepo:       chunkRepo,
		fileMetaRepo:    fileMetaRepo,
		fileChunkRepo:   fileChunkRepo,
		storageProvider: provider,
	}
}

func (s *UploadServiceImpl) InitUpload(ctx *gin.Context, fileName string, fileSize int64, mimeType string, ownerID int64) (*model.UploadTask, error) {
	if fileName == "" || fileSize <= 0 {
		return nil, errors.New("invalid file metadata")
	}

	// 设置默认分片大小：比如 5MB
	chunkSize := calculateChunkSize(fileSize)
	totalChunks := int((fileSize + int64(chunkSize) - 1) / int64(chunkSize))

	// UploadID 是前端使用的业务标识，使用分布式 ID
	uploadID := utils2.GenDistributeId()

	task := &model.UploadTask{
		UploadID:       model.UploadIDString(uploadID),
		FileName:       fileName,
		FileSize:       fileSize,
		MimeType:       mimeType,
		ChunkSize:      chunkSize,
		TotalChunks:    totalChunks,
		UploadedChunks: 0,
		IsComplete:     false,
		OwnerID:        &ownerID,
		CreatedAt:      time.Now(),
		UpdatedAt:      time.Now(),
	}

	// 使用 Request.Context() 来获取标准的 context.Context
	err := s.taskRepo.Create(ctx.Request.Context(), task)
	if err != nil {
		log.Printf("Failed to create upload task for file %s: %v", fileName, err)
		return nil, fmt.Errorf("failed to create upload task: %w", err)
	}

	log.Printf("Created upload task: upload_id=%d, file_name=%s, owner_id=%d", uploadID, fileName, ownerID)
	return task, nil
}

func (s *UploadServiceImpl) UploadChunk(ctx *gin.Context, uploadID int64, chunkIndex int, size int64, reader io.Reader) error {
	// Ensure task exists before writing chunk to avoid FK violations
	if _, err := s.taskRepo.FindByUploadID(ctx.Request.Context(), uploadID); err != nil {
		return fmt.Errorf("upload task not found: %w", err)
	}

	exists, err := s.chunkRepo.Exists(ctx.Request.Context(), uploadID, chunkIndex)
	if err != nil {
		return fmt.Errorf("check chunk exists failed: %w", err)
	}
	if exists {
		return nil // 已存在则直接跳过（也可以返回已存在错误）
	}

	// 生成唯一的文件名或路径
	fileName := fmt.Sprintf("%d_%d_%s.chunk", uploadID, chunkIndex, uuid.New().String())
	savePath := filepath.Join("uploads", fileName)

	userInfo, _ := utils2.GetUserInfo(ctx)
	storage, err := s.storageProvider.GetUserStorage(ctx.Request.Context(), fmt.Sprintf("%d", userInfo.ID))
	if err != nil {
		return fmt.Errorf("get storage failed: %w", err)
	}

	// 获取任务信息（用于更新流式 hash）
	task, err := s.taskRepo.FindByUploadID(ctx.Request.Context(), uploadID)
	if err != nil {
		return fmt.Errorf("find task failed: %w", err)
	}

	// 使用 TeeReader 同时上传和计算 hash（流式方式）
	// 读取分片内容用于计算 hash
	chunkBytes, err := io.ReadAll(reader)
	if err != nil {
		return fmt.Errorf("read chunk failed: %w", err)
	}

	// 计算分片 MD5
	chunkMD5Hasher := md5.New()
	chunkMD5Hasher.Write(chunkBytes)
	chunkMD5 := hex.EncodeToString(chunkMD5Hasher.Sum(nil))

	// 上传到存储（使用已读取的内容）
	chunkReader := bytes.NewReader(chunkBytes)
	if err := storage.Upload(ctx.Request.Context(), savePath, chunkReader, size, "application/octet-stream"); err != nil {
		return fmt.Errorf("upload chunk failed: %w", err)
	}

	// 记录数据库
	chunk := &model.UploadChunk{
		ID:         utils2.GenDistributeId(),
		UploadID:   uploadID,
		ChunkIndex: chunkIndex,
		ChunkSize:  int(size),
		ChunkMD5:   chunkMD5,
		StorageKey: savePath,
		CreatedAt:  time.Now(),
		UploadedAt: time.Now(),
	}

	if err := s.chunkRepo.CreateChunk(ctx.Request.Context(), chunk); err != nil {
		// 插入失败时删除已写入的文件（清理）
		_ = storage.Delete(ctx.Request.Context(), savePath)
		return fmt.Errorf("save chunk meta failed: %w", err)
	}

	task.UploadedChunks++

	// 流式更新 MD5 hash：使用标准库的 BinaryMarshaler 接口进行增量计算
	var streamingHasher *utils2.SerializableMD5
	if task.MD5State == "" {
		// 第一个分片，创建新的 MD5 hasher
		streamingHasher = utils2.NewSerializableMD5()
		streamingHasher.Write(chunkBytes)
	} else {
		// 后续分片：从保存的状态恢复 MD5 hasher
		var err error
		streamingHasher, err = utils2.RestoreMD5FromHex(task.MD5State)
		if err != nil {
			return fmt.Errorf("failed to restore MD5 state: %w", err)
		}
		// 追加当前 chunk
		streamingHasher.Write(chunkBytes)
	}

	// 保存当前状态和最终的 hash 值
	md5State, err := streamingHasher.SerializeHex()
	if err != nil {
		return fmt.Errorf("failed to serialize MD5 state: %w", err)
	}
	task.MD5State = md5State
	task.StreamingHash = hex.EncodeToString(streamingHasher.Sum(nil))

	// 更新任务
	if err := s.taskRepo.Update(ctx.Request.Context(), task); err != nil {
		return fmt.Errorf("update task failed: %w", err)
	}

	return nil
}

func (s *UploadServiceImpl) CompleteUpload(ctx *gin.Context, uploadID int64, clientHash string) error {
	task, err := s.taskRepo.FindByUploadID(ctx.Request.Context(), uploadID)
	if err != nil {
		return fmt.Errorf("find task failed: %w", err)
	}

	// 比对前后端 hash（MD5）
	// 注意：流式 hash 使用 MD5，但文件的最终 hash 仍使用 SHA256
	var streamingHashMatch bool
	if clientHash != "" && task.StreamingHash != "" {
		if clientHash != task.StreamingHash {
			return fmt.Errorf("hash mismatch: client=%s, server=%s", clientHash, task.StreamingHash)
		}
		log.Printf("Streaming hash (MD5) verification passed: %s", clientHash)
		streamingHashMatch = true
	} else if task.StreamingHash != "" {
		// 如果前端没有传递 hash，使用后端的流式 hash
		streamingHashMatch = true
	}

	// 获取所有分片
	uploadChunks, err := s.chunkRepo.GetChunksByUploadID(ctx.Request.Context(), uploadID)
	if err != nil {
		return fmt.Errorf("get chunks failed: %w", err)
	}

	if len(uploadChunks) != task.TotalChunks {
		return fmt.Errorf("incomplete upload: expected %d chunks, got %d", task.TotalChunks, len(uploadChunks))
	}

	// 按 chunk_index 升序排序
	sort.Slice(uploadChunks, func(i, j int) bool {
		return uploadChunks[i].ChunkIndex < uploadChunks[j].ChunkIndex
	})

	// 获取用户信息和存储
	userInfo, ok := utils2.GetUserInfo(ctx)
	if !ok || userInfo == nil || userInfo.ID == 0 {
		return fmt.Errorf("unauthorized: user info not found")
	}

	storage, err := s.storageProvider.GetUserStorage(ctx.Request.Context(), fmt.Sprintf("%d", userInfo.ID))
	if err != nil {
		return fmt.Errorf("get storage failed: %w", err)
	}

	// 流式 hash（MD5）用于验证上传完整性
	// 文件的最终 hash 使用 SHA256（存储到 HashSHA256 字段）
	// 计算文件的 SHA256 hash（用于存储）
	hashSumHasher := sha256.New()
	for _, chunk := range uploadChunks {
		chunkReader, err := storage.Download(ctx.Request.Context(), chunk.StorageKey)
		if err != nil {
			return fmt.Errorf("failed to read chunk %d for SHA256: %w", chunk.ChunkIndex, err)
		}
		if _, err := io.Copy(hashSumHasher, chunkReader); err != nil {
			chunkReader.Close()
			return fmt.Errorf("failed to hash chunk %d for SHA256: %w", chunk.ChunkIndex, err)
		}
		chunkReader.Close()
	}
	hashSum := hex.EncodeToString(hashSumHasher.Sum(nil))

	// 如果流式 hash 比对失败，记录警告但继续（兼容性）
	if !streamingHashMatch && clientHash != "" {
		log.Printf("Warning: streaming hash mismatch, but continuing with upload. Client: %s, Server: %s", clientHash, task.StreamingHash)
	}

	// 解析 parent_id（保存到当前目录）
	var parentID int64 = 0
	if pidStr := ctx.Query("parent_id"); pidStr != "" {
		if v, err := strconv.ParseInt(pidStr, 10, 64); err == nil {
			parentID = v
		}
	}

	// 创建文件元信息（逻辑合并：storage_key 为空，通过 file_chunk 关联表关联）
	nameKey := fmt.Sprintf("%d/%d/%s", userInfo.ID, parentID, task.FileName)
	meta := &model.FileMetadata{
		OwnerID:       userInfo.ID,
		ParentID:      parentID,
		Name:          task.FileName,
		NameUniqueKey: nameKey,
		IsDir:         false,
		Size:          task.FileSize,
		MimeType:      task.MimeType,
		StorageKey:    "", // 逻辑合并文件 storage_key 为空，通过 file_chunk 表关联
		StorageEngine: "local",
		HashSHA256:    hashSum,
		IsDeleted:     false,
	}

	if err := s.fileMetaRepo.Create(ctx.Request.Context(), meta); err != nil {
		return fmt.Errorf("failed to create file metadata: %w", err)
	}

	log.Printf("File metadata created: id=%d, name=%s, size=%d", meta.ID, meta.Name, meta.Size)

	// 将 upload_chunk 转换为 file_chunk（逻辑合并：保留分片）
	fileChunks := make([]*model.FileChunk, 0, len(uploadChunks))
	for _, uc := range uploadChunks {
		fc := &model.FileChunk{
			FileID:        meta.ID,
			ChunkIndex:    uc.ChunkIndex,
			ChunkSize:     uc.ChunkSize,
			ChunkMD5:      uc.ChunkMD5,
			StorageKey:    uc.StorageKey,
			StorageEngine: "local",
		}
		fileChunks = append(fileChunks, fc)
	}

	if err := s.fileChunkRepo.BatchCreate(ctx.Request.Context(), fileChunks); err != nil {
		// 如果创建分片失败，删除已创建的文件元信息
		_ = s.fileMetaRepo.Delete(ctx.Request.Context(), meta.ID)
		return fmt.Errorf("failed to create file chunks: %w", err)
	}

	log.Printf("Created %d file chunks for file id=%d", len(fileChunks), meta.ID)

	// 更新任务状态
	task.IsComplete = true
	if err := s.taskRepo.Update(ctx.Request.Context(), task); err != nil {
		log.Printf("Warning: failed to update task status: %v", err)
	}

	// 注意：不删除 upload_chunk 和临时分片文件，因为它们已经被转换为 file_chunk
	// 可以根据需要决定是否清理 upload_chunk 表

	log.Printf("File upload completed (logical merge): id=%d, name=%s, size=%d, chunks=%d", meta.ID, meta.Name, meta.Size, len(fileChunks))
	return nil
}

func (s *UploadServiceImpl) GetUploadStatus(ctx *gin.Context, uploadID int64) (*model.UploadTask, error) {
	return s.taskRepo.FindByUploadID(ctx.Request.Context(), uploadID)
}

func calculateChunkSize(fileSize int64) int {
	switch {
	case fileSize <= 20*1024*1024: // 小于20MB
		return 2 * 1024 * 1024 // 2MB
	case fileSize <= 100*1024*1024:
		return 5 * 1024 * 1024 // 5MB
	case fileSize <= 500*1024*1024:
		return 10 * 1024 * 1024 // 10MB
	default:
		return 20 * 1024 * 1024 // 20MB
	}
}
