package handler

import (
	rPool "cache/redis"
	"db"
	"fmt"
	redis "github.com/garyburd/redigo/redis"
	"math"
	"net/http"
	"os"
	"path"
	"strconv"
	"strings"
	"time"
	"util"
)

type MultipartUploadInfo struct {
	FileHash    string
	UploadId    string
	FileSize    int
	ChunkSize   int
	ChunkCount  int
	ChunkExists []int
}

const (
	// ChunkDir : 上传的分块所在目录
	ChunkDir = "/home/rui/go_project/filestore_server/data/chunk/"
	// MergeDir : 合并后的文件所在目录
	MergeDir = "/home/rui/go_project/filestore_server/data/merge/"
	// ChunkKeyPrefix : 分块信息对应的redis键前缀
	ChunkKeyPrefix = "MP_"
	// HashUpIDKeyPrefix : 文件hash映射uploadid对应的redis键前缀
	HashUpIDKeyPrefix = "HASH_UPID_"
	chuInx            = "chkidx_"
)

func init() {
	if err := os.MkdirAll(ChunkDir, 0744); err != nil {
		fmt.Println("无法指定目录用于存储分块文件: " + ChunkDir)
		os.Exit(1)
	}

	if err := os.MkdirAll(MergeDir, 0744); err != nil {
		fmt.Println("无法指定目录用于存储合并后文件: " + MergeDir)
		os.Exit(1)
	}
}

//初始化分块上传
func InitialMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
	// 1.解析用户请求
	r.ParseForm()
	username := r.Form.Get("username")
	filehash := r.Form.Get("filehash")
	filesize, err := strconv.Atoi(r.Form.Get("filesize"))

	if err != nil {
		w.Write(util.NewRespMsg(-1, "文件大小解析异常请检查", nil).JSONBytes())
		return
	}
	// 2.获取redis链接
	rCoon := rPool.GetRedisPool().Get()
	defer rCoon.Close()

	// 3.通过文件hash断点续传，获取uploadId
	fileKey := HashUpIDKeyPrefix + filehash
	keyExists, _ := redis.Bool(rCoon.Do("EXISTS", fileKey))
	var uploadId = ""
	if keyExists {
		uploadId, err = redis.String(rCoon.Do("GET", fileKey))
		if err != nil {
			w.Write(util.NewRespMsg(-1, "upload part failed", err.Error()).JSONBytes())
			return
		}
	}
	// 4.1 首次上传则新建uploadID
	// 4.2 断点续传则根据uploadID获取已上传的文件分块列表
	chunksExist := []int{}
	if uploadId == "" {
		uploadId = username + fmt.Sprintf("%x", time.Now().UnixNano())
	} else {
		chunks, err := redis.Values(rCoon.Do("HGETALL", ChunkKeyPrefix+uploadId))
		if err != nil {
			w.Write(util.NewRespMsg(-1, "Upload part failed", err.Error()).JSONBytes())
			return
		}
		for i := 0; i < len(chunks); i += 2 {
			k := string(chunks[i].([]byte))
			v := string(chunks[i+1].([]byte))
			if strings.HasPrefix(k, chuInx) && v == "1" {
				// chkidx_6 -> 6
				chunkIdx, _ := strconv.Atoi(k[7:len(k)])
				chunksExist = append(chunksExist, chunkIdx)
			}
		}
	}
	// 5.生成分块上传信息
	upInfo := MultipartUploadInfo{
		FileHash:    filehash,
		FileSize:    filesize,
		UploadId:    username + fmt.Sprintf("%x", time.Now().UnixNano()),
		ChunkSize:   5 * 1024 * 1024, //5M
		ChunkCount:  int(math.Ceil(float64(filesize) / (5 * 1024 * 1024))),
		ChunkExists: chunksExist,
	}
	// 6.写入分块信息
	hkey := ChunkKeyPrefix + upInfo.UploadId
	if len(upInfo.ChunkExists) <= 0 {
		rCoon.Do("HSET", hkey, "fileHash", upInfo.FileHash)
		rCoon.Do("HSET", hkey, "fileSize", upInfo.FileSize)
		rCoon.Do("HSET", hkey, "chunkSize", upInfo.ChunkSize)
		rCoon.Do("HSET", hkey, "chunkCount", upInfo.ChunkCount)
		rCoon.Do("EXPIRE", hkey, 1000*60)
		rCoon.Do("SET", fileKey, upInfo.UploadId, "EX", 1000*60)
	}
	// 5.返回初始化信息到客户端
	w.Write(util.NewRespMsg(0, "OK", upInfo).JSONBytes())
}

//分块上传文件
func UploadPartHandler(w http.ResponseWriter, r *http.Request) {
	// 1.解析用户请求参数
	r.ParseForm()
	uploadId := r.Form.Get("uploadid")
	chunkIndex := r.Form.Get("index")

	// 2.获取redis链接
	rConn := rPool.GetRedisPool().Get()
	defer rConn.Close()

	// 3.获取文件句柄，用于分块上传文件
	fpath := ChunkDir + uploadId + "/" + chunkIndex
	os.MkdirAll(path.Dir(fpath), 0744)
	fd, err := os.Create(fpath)
	if err != nil {
		w.Write(util.NewRespMsg(-1, "upload part faild", nil).JSONBytes())
		return
	}
	defer fd.Close()
	buf := make([]byte, 1024*1024)
	for {
		n, err := r.Body.Read(buf)
		fd.Write(buf[:n])
		if err != nil {
			break
		}
	}

	// 4.更新缓存
	_, err = rConn.Do("HSET", ChunkKeyPrefix+uploadId, chuInx+chunkIndex, 1)
	if err != nil {
		return
	}

	w.Write(util.NewRespMsg(0, "OK", nil).JSONBytes())
}

// CompleteUploadHandler : 通知上传合并
func CompleteUploadHandler(w http.ResponseWriter, r *http.Request) {
	//
	r.ParseForm()
	uploadId := r.Form.Get("uploadid")
	username := r.Form.Get("username")
	fileHash := r.Form.Get("filehash")
	fileSize := r.Form.Get("filesize")
	fileName := r.Form.Get("filename")

	//判断是否全部分块上传完成
	rConn := rPool.GetRedisPool().Get()

	data, err := redis.Values(rConn.Do("HGETALL", ChunkKeyPrefix+uploadId))
	if err != nil {
		w.Write(util.NewRespMsg(-1, "Complete Upload faild", nil).JSONBytes())
		return
	}
	totalCount := 0
	chunkCount := 0

	for i := 0; i < len(data); i += 2 {
		k := string(data[i].([]byte))
		v := string(data[i+1].([]byte))
		if k == "chunkCount" {
			totalCount, _ = strconv.Atoi(v)
		} else if strings.HasPrefix(k, "chunkidx_") && v == "1" {
			chunkCount++
		}
	}
	if totalCount != chunkCount {
		w.Write(util.NewRespMsg(-2, "invalid request", nil).JSONBytes())
		return
	}

	//合并分块
	// 4. 合并分块 (备注: 更新于2020/04/01; 此合并逻辑非必须实现，因后期转移到ceph/oss时也可以通过分块方式上传)
	if mergeSuc := util.MergeChunksByShell(ChunkDir+uploadId, MergeDir+fileHash, fileHash); !mergeSuc {
		w.Write(util.NewRespMsg(-3, "Complete upload failed", nil).JSONBytes())
		return
	}
	//更新唯一文件表
	fSize, _ := strconv.Atoi(fileSize)
	db.OnFileUploadFinished(fileHash, fileName, int64(fSize), "")
	db.OnUserFileUploadFinished(username, fileHash, fileName, int64(fSize))

	// 删除已上传的分块文件及redis分块信息
	_, delHashErr := rConn.Do("DEL", HashUpIDKeyPrefix+fileHash)
	delUploadID, delUploadInfoErr := redis.Int64(rConn.Do("DEL", ChunkKeyPrefix+uploadId))
	if delUploadID != 1 || delUploadInfoErr != nil || delHashErr != nil {
		w.Write(util.NewRespMsg(-4, "Complete upload part failed", nil).JSONBytes())
		return
	}

	delRes := util.RemovePathByShell(ChunkDir + uploadId)
	if !delRes {
		fmt.Printf("Failed to delete chuncks as upload comoleted, uploadID: %s\n", uploadId)
	}

	//返回结果
	w.Write(util.NewRespMsg(0, "Complete Upload faild", nil).JSONBytes())
}

// CancelUploadHandler : 文件取消上传接口
func CancelUploadHandler(w http.ResponseWriter, r *http.Request) {
	// 1. 解析用户请求参数
	r.ParseForm()
	filehash := r.Form.Get("filehash")

	// 2. 获得redis的一个连接
	rConn := rPool.GetRedisPool().Get()
	defer rConn.Close()

	// 3. 检查uploadID是否存在，如果存在则删除
	uploadID, err := redis.String(rConn.Do("GET", HashUpIDKeyPrefix+filehash))
	if err != nil || uploadID == "" {
		w.Write(util.NewRespMsg(-1, "Cancel upload part failed", nil).JSONBytes())
		return
	}

	//删除redis信息
	_, delHashErr := rConn.Do("DEL", HashUpIDKeyPrefix+filehash)
	_, delUploadInfoErr := rConn.Do("DEL", ChunkKeyPrefix+uploadID)
	if delHashErr != nil || delUploadInfoErr != nil {
		w.Write(util.NewRespMsg(-2, "Cancel upload part failed", nil).JSONBytes())
		return
	}

	// 4. 删除已上传的分块文件
	delChkRes := util.RemovePathByShell(ChunkDir + uploadID)
	if !delChkRes {
		fmt.Printf("Failed to delete chunks as upload canceled, uploadID:%s\n", uploadID)
	}

	// 5. 响应客户端
	w.Write(util.NewRespMsg(0, "OK", nil).JSONBytes())
}
