package handler

import (
	"cloud-disk/global/db"
	"cloud-disk/global/define"
	"cloud-disk/logic"
	"cloud-disk/model/entity"
	"cloud-disk/model/request"
	"cloud-disk/model/response"
	"cloud-disk/mq"
	"cloud-disk/store"
	"cloud-disk/util"
	"encoding/json"
	"fmt"
	"github.com/gin-gonic/gin"
	"io"
	"log"
	"math"
	"net/http"
	"os"
	"path"
	"strconv"
	"time"
)

// DirUploadHandler 用户创建文件夹 返回给前端该文件夹的id
func DirUploadHandler(c *gin.Context) {
	identity := c.GetString("identity")
	req := request.FileUpload{}
	reqJson := c.Request.FormValue("req")
	json.Unmarshal([]byte(reqJson), &req)
	uf := entity.UserRepository{
		Identity:     util.UUID(),
		UserIdentity: identity,
		ParentId:     req.ParentId,
		Type:         "dir",
		Name:         req.Name,
	}
	id, err := logic.UserFileUploadLogic(uf)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": err.Error(),
		})
		return
	}
	c.JSON(http.StatusOK, id)
}

// PrepareUploadHandler 拿到文件的hash和filename,parentId, hash主要是为了验证一致性，filename是为了插入user_repository
func PrepareUploadHandler(c *gin.Context) {
	identity := c.GetString("identity")
	req := request.FileUpload{}
	c.BindJSON(&req)
	existRepoIdentity, exists, err := logic.IsExistInRepoPool(req.Hash)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": err.Error(),
		})
		return
	}
	// not exists, return message
	if !exists {
		c.JSON(http.StatusOK, "no exist")
		return
	}
	// if exists
	_, err = logic.UserFileUploadLogic(entity.UserRepository{
		Identity:           util.UUID(),
		UserIdentity:       identity,
		ParentId:           req.ParentId,
		RepositoryIdentity: existRepoIdentity,
		// 这里只能是file, 上传文件夹无需调用该函数
		Type: "file",
		Name: req.Name,
	})
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": "flash upload user file fail",
		})
		return
	}
	c.JSON(http.StatusOK, "ok")
}

// FileUploadHandler 都是不存在的文件   写到本地和异步上传文件到oss
// 简单文件 直接上传不分片  前端检查大小即可 然后直接调用这个接口
func FileUploadHandler(c *gin.Context) {
	start := time.Now()
	identity := c.GetString("identity")
	reqJson := c.Request.FormValue("req")

	req := request.FileUpload{}
	json.Unmarshal([]byte(reqJson), &req)
	fhash := req.Hash
	fname := req.Name
	parentId := req.ParentId
	mf, fh, err := c.Request.FormFile("file")
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "receive file fail",
		})
		return
	}
	fsize := int(fh.Size)
	localPath := path.Join("/", "tmp", "clouddisk", req.Hash+"_"+fname)

	uf := entity.UserRepository{
		Identity:     util.UUID(),
		UserIdentity: identity,
		ParentId:     parentId,
		Type:         "file",
		Name:         fname,
	}
	// store to local
	err = store.ToLocal(localPath, mf)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": err.Error(),
		})
		return
	}
	fmt.Printf("write to local cost : %vs \n", time.Since(start).Seconds())
	// add to global repo pool
	repoIdentity := util.UUID()
	repo := entity.RepositoryPool{
		Identity: repoIdentity,
		Hash:     req.Hash,
		Name:     fname,
		Size:     fsize,
		Location: localPath,
		StoreWay: "local",
		Status:   1,
	}
	err = logic.FileUploadLogic(repo)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": "upload file fail",
		})
		return
	}

	// add to user repository
	uf.RepositoryIdentity = repo.Identity
	_, err = logic.UserFileUploadLogic(uf)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": "upload user file fail",
		})
		return
	}

	// bigger than one chunksize, store to oss
	if fsize > define.Conf.OSS.ChunkSize {
		ext := util.FileExt(fname)
		msg := mq.TransferMessage{
			FileHash:     fhash,
			Prefix:       "normal",
			CurrLocation: localPath,
			StoreKey:     fhash + ext,
		}
		fmt.Println("publish message")
		err = logic.PublishStoreMsgLogic(msg)
		// todo 发送该消息失败
	}
	c.JSON(http.StatusOK, "ok")
	return
}

// InitMultipartUploadHandler 初始化分片信息
func InitMultipartUploadHandler(c *gin.Context) {
	// req包括 filename filehash filesize
	req := request.MultipartInit{}
	err := c.BindJSON(&req)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": "parameter error",
		})
		return
	}

	rConn := db.RDB().Get()
	defer rConn.Close()

	mpInfo := response.MultipartUploadInfo{
		UploadID:   util.UUID(),
		ChunkSize:  define.ChunkSize,
		ChunkCount: int(math.Ceil(float64(req.Size) / define.ChunkSize)),
	}

	// 将初始化信息写到redis中
	_, err = rConn.Do("HMSET", "MPINFO_"+mpInfo.UploadID,
		"count", mpInfo.ChunkCount,
		"hash", req.Hash,
		"name", req.Name,
		"size", req.Size,
		"parentId", req.ParentId,
		"dstPath", path.Join(define.FileBasePath, req.Hash+"_"+req.Name))
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "init mp upload fail",
		})
		return
	}
	c.JSON(http.StatusOK, mpInfo)

}

// MultipartUploadHandler 接收分片
func MultipartUploadHandler(c *gin.Context) {
	chunk, _, err := c.Request.FormFile("file")
	defer chunk.Close()
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "parameter error",
		})
		return
	}
	formReq := c.Request.FormValue("req")
	req := request.PartUpload{}
	err = json.Unmarshal([]byte(formReq), &req)
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "parameter error",
		})
		return
	}

	// redis
	r := db.RDB().Get()
	defer r.Close()
	// todo 查redis 看对应分片在不在 如果在就不用传了 直接return ok
	// 适用于如果之前上传失败导致重传，可以快速检查是否已经存在
	reply, err := r.Do("SISMEMBER", "MPDONE_"+req.UploadID, req.Index)
	exists := reply.(int64)
	if exists == 1 {
		c.JSON(http.StatusOK, "ok")
		return
	}
	os.MkdirAll(path.Join(define.ChunkBasePath, req.UploadID), 0755)
	fpath := path.Join(define.ChunkBasePath, req.UploadID, strconv.Itoa(req.Index))
	f, err := os.Create(fpath)
	buf := make([]byte, 1<<20)
	if _, err := io.CopyBuffer(f, chunk, buf); err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "upload fail",
		})
		return
	}
	// 写完之后，记录到redis

	_, err = r.Do("SADD", "MPDONE_"+req.UploadID, req.Index)
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "mp upload fail",
		})
		return
	}

	c.JSON(http.StatusOK, "ok")
}

// MergeMultiPartHandler 合并全部 并删除原文件 以及redis中相关信息
func MergeMultiPartHandler(c *gin.Context) {
	identity := c.GetString("identity")
	// 校验分片是否全部存在
	// 如果没有, 返回一个切片列表,
	req := request.MergeMultiPart{}
	err := c.BindJSON(&req)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": "parameter error",
		})
		return
	}

	r := db.RDB().Get()
	defer r.Close()
	// 查询该upladId对应的初始化信息
	ret, err := r.Do("HGETALL", "MPINFO_"+req.UploadID)
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "unknown error",
		})
		return
	}
	m := map[string]string{}
	for i := 0; i < len(ret.([]interface{}))-1; i++ {
		k := string(ret.([]interface{})[i].([]byte))
		v := string(ret.([]interface{})[i+1].([]byte))
		m[k] = v
	}
	count, _ := strconv.Atoi(m["count"])
	parentId, _ := strconv.Atoi(m["parentId"])
	size, _ := strconv.Atoi(m["size"])
	hash := m["hash"]
	name := m["name"]
	dstPath := m["dstPath"]
	// 查询已经上传了的个数
	ret, err = r.Do("SCARD", "MPDONE_"+req.UploadID)
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "unknown error",
		})
		return
	}
	doneCount := ret.(int64)
	if count != int(doneCount) {
		c.JSON(http.StatusOK, gin.H{
			"msg": "not complete upload",
		})
		return
	}

	// 分块合并 必须是同步的  大文件合并慢  前端这个线程是是必须要阻塞的
	dstFile, err := os.Create(dstPath)
	defer dstFile.Close()
	buf := make([]byte, 1<<20)
	for i := 0; i < count; i++ {
		open, err := os.Open(path.Join(define.ChunkBasePath, req.UploadID, strconv.Itoa(i)))
		if err != nil {
			log.Println(err)
			c.JSON(http.StatusOK, gin.H{
				"msg": "upload fail",
			})
			return
		}
		_, err = io.CopyBuffer(dstFile, open, buf)
		if err != nil {
			log.Println(err)
			c.JSON(http.StatusOK, gin.H{
				"msg": "upload fail",
			})
			return
		}
		open.Close()
	}

	// 合并完了之后 往数据库新增
	repoIdentity := util.UUID()
	err = logic.FileUploadLogic(entity.RepositoryPool{
		Identity: repoIdentity,
		Hash:     hash,
		Name:     name,
		Size:     size,
		Location: dstPath,
		StoreWay: "local",
		Status:   1,
	})
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "upload fail",
		})
		return
	}

	_, err = logic.UserFileUploadLogic(entity.UserRepository{
		Identity:           util.UUID(),
		UserIdentity:       identity,
		ParentId:           parentId,
		RepositoryIdentity: repoIdentity,
		Type:               "file",
		Name:               name,
	})
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "upload fail",
		})
		return
	}

	// 最后删除原先的文件（非实时，可以丢到rabbitmq）
	err = logic.PublishRemoveChunksLogic(path.Join(define.ChunkBasePath, req.UploadID))
	if err != nil {
		fmt.Println("publish remove fail")
		// todo fail
	}
	// 将redis中相关uploadId的内容删除

	c.JSON(http.StatusOK, "ok")

}

func DirInfoHandler(c *gin.Context) {
	id, err := strconv.Atoi(c.Param("id"))
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "parameter error",
		})
		return
	}
	data, err := logic.DirInfoLogic(id)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": err.Error(),
		})
		return
	}
	c.JSON(http.StatusOK, *data)
}

func FileInfoHandler(c *gin.Context) {
	id, err := strconv.Atoi(c.Param("id"))
	if err != nil {
		log.Println(err)
		c.JSON(http.StatusOK, gin.H{
			"msg": "parameter error",
		})
		return
	}
	size, err := logic.FileInfoLogic(id)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"msg": err.Error(),
		})
		return
	}
	c.JSON(http.StatusOK, size)
}
