// handlers/upload_handlers.go
package handlers

import (
	"crypto/md5"
	"encoding/hex"
	"errors"
	"fmt"
	"io"
	"log"
	"mime/multipart"
	"net"
	"net/http"
	"os"
	"path/filepath"
	"strconv"
	"strings"
	"sync"
	"time"

	"github.com/gin-gonic/gin"
	"github.com/google/uuid"

	"file-manager/models"
	"file-manager/storage"
	"file-manager/utils"
)

type UploadHandlers struct{}

var Upload_Handlers = &UploadHandlers{}

// UploadChunk handles POST /api/upload/chunk (分片上传)
func (h *UploadHandlers) UploadChunk(c *gin.Context) {
	err := c.Request.ParseMultipartForm(32 << 20)
	if err != nil {
		log.Printf("ParseMultipartForm error: %v", err)
		c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse form data"})
		return
	}

	fileID := c.PostForm("fileId")
	chunkIndexStr := c.PostForm("chunkIndex")
	totalChunksStr := c.PostForm("totalChunks")

	if fileID == "" || chunkIndexStr == "" || totalChunksStr == "" {
		log.Printf("Missing fields from %s: fileId=%s, chunkIndex=%s, totalChunks=%s", c.ClientIP(), fileID, chunkIndexStr, totalChunksStr)
		c.JSON(http.StatusBadRequest, gin.H{"error": "Missing required fields"})
		return
	}

	log.Printf("Received chunk from %s: fileId=%s, chunkIndex=%s, totalChunks=%s", c.ClientIP(), fileID, chunkIndexStr, totalChunksStr)

	chunkIndex, err := strconv.Atoi(chunkIndexStr)
	if err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid chunk index"})
		return
	}

	totalChunks, err := strconv.Atoi(totalChunksStr)
	if err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid total chunks"})
		return
	}

	storage.UpdateUploadStatus(fileID, totalChunks, chunkIndex+1)

	chunkFilename := fmt.Sprintf("%s.chunk.%d", fileID, chunkIndex)
	chunkPath := filepath.Join(uploadDir, chunkFilename)

	fileHeader, _, err := c.Request.FormFile("chunk")
	if err != nil {
		log.Printf("FormFile error: %v", err)
		c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to get chunk file"})
		return
	}
	defer fileHeader.Close()

	out, err := os.Create(chunkPath)
	if err != nil {
		c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create chunk file"})
		return
	}
	defer out.Close()

	// 使用 bufferPool 优化分片写入
	bufPtr := bufferPool.Get().(*[]byte)
	defer bufferPool.Put(bufPtr)
	_, err = io.CopyBuffer(out, fileHeader, *bufPtr)
	if err != nil {
		if errors.Is(err, net.ErrClosed) || strings.Contains(err.Error(), "connection reset by peer") {
			log.Printf("Upload interrupted by client: %v", err)
			return
		}
		c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to save chunk"})
		return
	}

	log.Printf("Chunk saved: %s", chunkPath)
	c.JSON(http.StatusOK, gin.H{
		"status":     "success",
		"fileId":     fileID,
		"chunkIndex": chunkIndex,
	})
}

// CompleteUpload handles POST /api/upload/complete (完成分片上传)
func (h *UploadHandlers) CompleteUpload(c *gin.Context) {
	var req struct {
		FileID   string `json:"fileId"`
		FileName string `json:"fileName"`
		FileSize int64  `json:"fileSize"`
		Parent   string `json:"parent"`
	}

	if err := c.ShouldBindJSON(&req); err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body: " + err.Error()})
		return
	}

	if req.Parent == "" {
		req.Parent = "root"
	}

	// 🔐 每个 fileId 使用独立锁，避免并发写冲突
	mu, _ := sessionLocks.LoadOrStore(req.FileID, &sync.Mutex{})
	mu.(*sync.Mutex).Lock()
	defer mu.(*sync.Mutex).Unlock()

	totalChunks := (req.FileSize + int64(chunkSize) - 1) / int64(chunkSize)
	baseChunkPath := filepath.Join(uploadDir, req.FileID+".chunk.")

	// 1. 检查所有分片是否存在
	for i := 0; i < int(totalChunks); i++ {
		chunkPath := baseChunkPath + strconv.Itoa(i)
		if _, err := os.Stat(chunkPath); os.IsNotExist(err) {
			log.Printf("Missing chunk: %s", chunkPath)
			c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("Chunk %d is missing", i)})
			return
		}
	}

	// 2. 创建临时合并文件
	finalPath := filepath.Join(uploadDir, req.FileName)
	tempPath := finalPath + ".tmp"

	finalFile, err := os.Create(tempPath)
	if err != nil {
		c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create temporary file"})
		return
	}

	// 初始化 MD5
	hash := md5.New()
	// 使用 bufferPool 优化合并和MD5计算
	bufPtr := bufferPool.Get().(*[]byte)
	defer bufferPool.Put(bufPtr)
	multiWriter := io.MultiWriter(finalFile, hash)

	// 3. 合并所有分片
	for i := 0; i < int(totalChunks); i++ {
		chunkPath := baseChunkPath + strconv.Itoa(i)

		chunkFile, err := os.Open(chunkPath)
		if err != nil {
			log.Printf("Failed to open chunk: %s, error: %v", chunkPath, err)
			finalFile.Close()
			os.Remove(tempPath)
			c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to open chunk for merging"})
			return
		}

		_, err = io.CopyBuffer(multiWriter, chunkFile, *bufPtr)
		chunkFile.Close() // ✅ 立即关闭
		if err != nil {
			log.Printf("Failed to copy chunk %d: %v", i, err)
			finalFile.Close()
			os.Remove(tempPath)
			c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to write chunk data during merge"})
			return
		}

		// ✅ 删除前检查是否存在
		if _, err := os.Stat(chunkPath); err == nil {
			if delErr := os.Remove(chunkPath); delErr != nil {
				log.Printf("Warning: failed to remove chunk %s: %v", chunkPath, delErr)
			}
		}
	}

	// 4. 刷盘并关闭
	if err := finalFile.Sync(); err != nil {
		log.Printf("Failed to sync file to disk: %v", err)
		finalFile.Close()
		os.Remove(tempPath)
		c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to flush data to disk"})
		return
	}
	if err := finalFile.Close(); err != nil {
		os.Remove(tempPath)
		c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to close temporary file"})
		return
	}

	// 5. 重命名（支持重试）
	const maxRetries = 3
	var renameErr error
	for i := 0; i < maxRetries; i++ {
		_ = os.Remove(finalPath) // 删除旧文件
		renameErr = os.Rename(tempPath, finalPath)
		if renameErr == nil {
			break
		}
		if utils.IsFileInUse(renameErr) {
			time.Sleep(100 * time.Millisecond)
			continue
		} else {
			break
		}
	}

	if renameErr != nil {
		log.Printf("Failed to rename file: %v", renameErr)
		os.Remove(tempPath)
		c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to finalize file"})
		return
	}

	info, err := os.Stat(finalPath)
	if err != nil || info.Size() != req.FileSize {
		log.Printf("File size mismatch: expected %d, got %d", req.FileSize, info.Size())
		c.JSON(http.StatusInternalServerError, gin.H{"error": "File size mismatch after merge"})
		return
	}

	md5Sum := hex.EncodeToString(hash.Sum(nil))

	newFile := &models.File{
		ID:        uuid.New().String(),
		Name:      req.FileName,
		Size:      req.FileSize,
		Type:      "file",
		Parent:    req.Parent,
		CreatedAt: time.Now(),
		UpdatedAt: time.Now(),
		MD5:       md5Sum,
		Path:      finalPath,
	}

	storage.AddFile(newFile)

	go storage.RemoveUploadStatus(req.FileID)

	log.Printf("File merged and saved: %s (MD5: %s)", req.FileName, md5Sum)
	c.JSON(http.StatusOK, gin.H{"file": newFile})
}

// UploadStatus handles GET /api/upload/status/:fileId
func (h *UploadHandlers) UploadStatus(c *gin.Context) {
	fileID := c.Param("fileId")
	status := storage.GetUploadStatus(fileID)
	if status == nil {
		c.JSON(http.StatusNotFound, gin.H{"error": "Upload not found"})
		return
	}
	c.JSON(http.StatusOK, status)
}

// VerifyUpload handles GET /api/upload/verify (秒传验证)
func (h *UploadHandlers) VerifyUpload(c *gin.Context) {
	md5Sum := c.Query("md5")
	fileSizeStr := c.Query("size")

	if fileSizeStr == "" {
		c.JSON(http.StatusBadRequest, gin.H{"error": "Missing file size"})
		return
	}

	fileSize, err := strconv.ParseInt(fileSizeStr, 10, 64)
	if err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid file size"})
		return
	}

	if md5Sum == "" {
		c.JSON(http.StatusBadRequest, gin.H{"error": "Missing MD5 checksum"})
		return
	}

	// We need to iterate over all files to check for match
	for _, file := range storage.ListFilesByParent("root") {
		if file.MD5 == md5Sum && file.Size == fileSize {
			log.Printf("File hit on秒传: MD5=%s, Size=%d, FileName=%s", md5Sum, fileSize, file.Name)
			c.JSON(http.StatusOK, gin.H{
				"exists": true,
				"file":   file,
			})
			return
		}
	}

	c.JSON(http.StatusOK, gin.H{
		"exists": false,
	})
}

// DragUpload handles POST /api/upload/drag (拖拽上传)
func (h *UploadHandlers) DragUpload(c *gin.Context) {
	reader, err := c.Request.MultipartReader()
	if err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"error": "无法读取 multipart 数据"})
		return
	}

	var results []gin.H
	var mu sync.Mutex
	var wg sync.WaitGroup

	for {
		part, err := reader.NextPart()
		if err == io.EOF {
			break
		}
		if err != nil {
			mu.Lock()
			results = append(results, gin.H{"error": "读取分部失败", "detail": err.Error()})
			mu.Unlock()
			continue
		}

		if part.FileName() == "" {
			part.Close()
			continue
		}

		filename := part.FileName()
		parent := c.DefaultQuery("parent", "root")

		if err := uploadSem.Acquire(c, 1); err != nil {
			mu.Lock()
			results = append(results, gin.H{"error": "上传被中断", "file": filename})
			mu.Unlock()
			part.Close()
			continue
		}

		wg.Add(1)
		go func(p *multipart.Part, name, dir string) {
			defer wg.Done()
			defer uploadSem.Release(1)
			defer p.Close()

			result := h.processUploadedFileFromReader(p, name, dir)

			mu.Lock()
			results = append(results, result)
			mu.Unlock()
		}(part, filename, parent)
	}

	wg.Wait()

	c.JSON(http.StatusOK, gin.H{"results": results})
}

// processUploadedFileFromReader 从 io.Reader 流式处理上传文件
func (h *UploadHandlers) processUploadedFileFromReader(reader io.Reader, filename, parent string) gin.H {
	if parent == "" {
		parent = "root"
	}

	safeName := utils.SanitizeFilename(filename)
	if safeName == "" {
		return gin.H{"error": "无效的文件名"}
	}

	fileID := uuid.New().String()
	dstPath := filepath.Join(uploadDir, safeName)

	dstFile, err := os.Create(dstPath)
	if err != nil {
		return gin.H{"error": "无法创建目标文件", "file": filename}
	}
	defer dstFile.Close()

	// 使用 bufferPool 优化拖拽上传
	bufPtr := bufferPool.Get().(*[]byte)
	defer bufferPool.Put(bufPtr)
	hash := md5.New()
	multiWriter := io.MultiWriter(dstFile, hash)

	copiedBytes, err := io.CopyBuffer(multiWriter, reader, *bufPtr)
	if err != nil {
		dstFile.Close()
		os.Remove(dstPath)
		return gin.H{"error": "保存文件失败", "file": filename}
	}

	fileSize := copiedBytes
	md5Sum := hex.EncodeToString(hash.Sum(nil))

	newFile := &models.File{
		ID:        fileID,
		Name:      safeName,
		Size:      fileSize,
		Type:      "file",
		Parent:    parent,
		CreatedAt: time.Now(),
		UpdatedAt: time.Now(),
		MD5:       md5Sum,
		Path:      dstPath,
	}

	storage.AddFile(newFile)

	return gin.H{
		"success": true,
		"file":    newFile,
	}
}

// CleanupOldUploads periodically cleans up old upload statuses and chunks
func (h *UploadHandlers) CleanupOldUploads() {
	ticker := time.NewTicker(30 * time.Minute)
	defer ticker.Stop()

	for range ticker.C {
		var chunksToDelete []string
		var uploadIDsToDelete []string

		now := time.Now()
		for fileID, status := range storage.GetAllUploadStatuses() {
			if now.Sub(status.LastModified) > 24*time.Hour {
				for i := 0; i < status.TotalChunks; i++ {
					chunksToDelete = append(chunksToDelete, filepath.Join(uploadDir, fmt.Sprintf("%s.chunk.%d", fileID, i)))
				}
				uploadIDsToDelete = append(uploadIDsToDelete, fileID)
			}
		}

		for _, chunkPath := range chunksToDelete {
			_ = os.Remove(chunkPath)
		}

		for _, fileID := range uploadIDsToDelete {
			storage.RemoveUploadStatus(fileID)
		}
	}
}
