package file

import (
	"encoding/xml"
	"errors"
	"fmt"
	"hft/Database/admin/file"
	fileSql "hft/Database/admin/file"
	"hft/Http/Models/dao"
	adminreq "hft/Http/Models/request/admin_req"
	"hft/Http/Models/response/admin_res"
	"hft/Http/Services/base"
	dirdao "hft/Http/Services/dir/dir_dao"
	filedao "hft/Http/Services/file/file_dao"
	"hft/Http/Services/file/file_model"
	"hft/Http/Services/file/file_utils"
	utils "hft/Http/Utils"
	"hft/e"
	"hft/global"
	"hft/tools"
	"hft/tools/upload"
	"io"
	"mime/multipart"
	"time"

	"github.com/aliyun/aliyun-oss-go-sdk/oss"
)

type File struct{}

// 设置封面封面
func (*File) UploadTempFile(claims tools.BaseClaims, file io.Reader, fileID int) (err error) {

	account, err := base.FindAccount(claims, global.OC_DB)
	if err != nil {
		return err
	}

	var bucket, _ = upload.NewBucket()

	var newFilename = file_utils.GenerateFIleName("temp.jpg", tools.GenUUID())

	// 先删除原本的cover
	var nowFile *fileSql.File
	err = global.OC_DB.
		Model(&fileSql.File{}).
		Where("account_id = ?", account.ID).
		Where("id = ?", fileID).First(&nowFile).Error
	if err != nil {
		return errors.New(e.FileNotExist)
	}
	if nowFile.Cover != "" {
		bucket.DeleteObject(nowFile.Cover)
	}

	// 创建新的cover
	fmt.Println(newFilename, file, "allalal")
	err = bucket.PutObject(newFilename, file)

	if err != nil {
		return errors.New(e.ErrorInfo)
	}

	// 更新
	err = global.OC_DB.Model(&fileSql.File{}).Where("id = ?", fileID).Update("cover", newFilename).Error

	if err != nil {
		return errors.New(e.ErrorInfo)
	}

	return nil
}

// 文件列表
func (*File) List(claims tools.BaseClaims, paginate *adminreq.FilePage) (page *utils.Page, err error) {

	account, err := base.FindAccount(claims, global.OC_DB)
	if err != nil {
		return nil, err
	}

	ids, err := dirdao.FindAllDirBelongThisDir(account.ID, paginate.DirID, global.OC_DB)

	if err != nil {
		return nil, err
	}

	var fileList []*file.File
	var newFileList = make([]*file_model.ResFile, 0)

	global.OC_DB.Model(&file.File{}).Scopes(filedao.FileDao.List(account.ID, paginate, ids, global.OC_DB)).Find(&fileList).Scopes(dao.GetCount(global.OC_DB)).Count(&paginate.Total)

	for _, v := range fileList {
		if v.Cover != "" {
			v.Cover = global.OC_CONFIG.AliyunOSS.BucketUrl + "/" + v.Cover
		}
		var baseUrl = v.GetUrl
		v.GetUrl = global.OC_CONFIG.AliyunOSS.BucketUrl + "/" + v.GetUrl
		newFileList = append(newFileList, &file_model.ResFile{
			File: fileSql.File{
				ID:         v.ID,
				DirID:      v.DirID,
				DirLink:    v.DirLink,
				FileType:   v.FileType,
				FileSize:   v.FileSize,
				CreateTime: v.CreateTime,
				GetUrl:     v.GetUrl,
				Cover:      v.Cover,
				RealName:   v.RealName,
			},
			BaseUrl: baseUrl,
		})
	}

	paginate.Data = newFileList

	return &paginate.Page, nil
}

// 修改文件名称
func (*File) Edit(claims tools.BaseClaims, id int, name string) (err error) {
	account, err := base.FindAccount(claims, global.OC_DB)
	if err != nil {
		return err
	}
	err = global.OC_DB.Model(&file.File{}).Where("id = ?", id).Where("account_id = ?", account.ID).Update("real_name", name).Error

	if err != nil {
		return errors.New(e.ErrorInfo)
	}

	return nil
}

// 删除文件
func (*File) Delete(claims tools.BaseClaims, ids []int) (err error) {
	account, err := base.FindAccount(claims, global.OC_DB)
	if err != nil {
		return err
	}
	var files []*file.File

	global.OC_DB.Model(&file.File{}).Where("account_id = ?", account.ID).Find(&files, ids)

	var objectNames []string

	for _, v := range files {
		objectNames = append(objectNames, v.GetUrl)
	}

	var oss = upload.NewOss()

	fmt.Println(objectNames)

	err = oss.DeleteFile(objectNames)

	if err != nil {
		return errors.New(e.ErrorInfo)
	}

	err = global.OC_DB.Model(&file.File{}).Where("account_id = ?", account.ID).Unscoped().Delete(&fileSql.File{}, ids).Error

	if err != nil {
		return errors.New(e.ErrorInfo)
	}
	return nil
}

// 返回文件上传的uuid
func (*File) ReturnUUID(claims tools.BaseClaims, data []*adminreq.FileUUID) (list []*admin_res.FileUUID, err error) {

	maxSize, err := tools.ParseSize(global.OC_CONFIG.System.MaxFile)
	if err != nil {
		return nil, errors.New(e.ErrorInfo)
	}
	slice, err := tools.ParseSize(global.OC_CONFIG.System.SliceFile)
	if err != nil {
		return nil, errors.New(e.ErrorInfo)
	}

	// 算出上传的文件的总容量
	var fileSize int64 = 0
	for _, v := range data {
		fileSize += int64(v.Size)
	}

	err = filedao.FileDao.IsOverUserLimit(fileSize, claims, global.OC_DB)

	if err != nil {
		return nil, err
	}

	var reqUUIDList = make([]*admin_res.FileUUID, 0)
	if !file_utils.IsOverMaxFileLimit(data, int(maxSize)) {
		return nil, errors.New(e.OverFileSize)
	}

	for _, v := range data {
		if v.Size > int(slice) {
			reqUUIDList = append(reqUUIDList, &admin_res.FileUUID{MD5Secret: v.MD5Secret, Chunk: int(slice), NeedSlice: true, Filename: v.Filename})
		} else {
			reqUUIDList = append(reqUUIDList, &admin_res.FileUUID{MD5Secret: v.MD5Secret, Chunk: 0, NeedSlice: false, Filename: v.Filename})
		}
		//设置文件状态
		global.OC_REDIS.Set(v.MD5Secret+"-status", "0", -1)
	}

	return reqUUIDList, nil
}

func (*File) UploadSliceFile(claims tools.BaseClaims, file multipart.File, chunkTotal int, filename string, uuid string, pid int, index int, fileType string, chunkSize int64, totalSize int64) (data *admin_res.FileProgress, err error) {
	var bucket, _ = upload.NewBucket()
	var newFilename = file_utils.GenerateFIleName(filename, uuid)
	list, err := global.OC_REDIS.LRange(uuid, 0, -1).Result() //判断是否是第一次传输
	if err != nil {
		return nil, err
	}
	if len(list) == 0 {
		// 初始化切片
		imur, err := bucket.InitiateMultipartUpload(newFilename, oss.ObjectStorageClass(oss.StorageStandard))
		if err != nil {
			fmt.Println(err, "1")
			return nil, err
		}
		fmt.Println(tools.JsonParse(imur))
		_, err = global.OC_REDIS.Set(uuid+"-upload_id", imur.UploadID, 12*time.Hour).Result()
		if err != nil {
			fmt.Println(err, "3")
			return nil, err
		}
	}

	// 获取上传id
	val, err := global.OC_REDIS.Get(uuid + "-upload_id").Result()

	if err != nil {
		fmt.Println(err, "4")
		return nil, err
	}

	// 创建新切片
	newImur := &oss.InitiateMultipartUploadResult{XMLName: xml.Name{Space: "", Local: ""}, Key: newFilename, UploadID: val, Bucket: bucket.BucketName}
	// 分片
	slicePart, err := bucket.UploadPart(*newImur, file, chunkSize, index+1)

	if err != nil {
		fmt.Println(err, "2")
		return nil, errors.New("文件传输错误")
	}

	// 记录上传的切片id
	global.OC_REDIS.RPush(uuid, slicePart.ETag)

	if len(list) == 1 {
		global.OC_REDIS.Expire(uuid, 12*time.Hour)
	}

	newList, _ := global.OC_REDIS.LRange(uuid, 0, -1).Result()
	// 创建进度条
	var progress = &admin_res.FileProgress{UUID: uuid, ChunkTotal: chunkTotal, Index: index + 1, Status: 0}
	if len(newList) == chunkTotal {
		// 合并
		var parts []oss.UploadPart
		for k, v := range newList {
			var part = &oss.UploadPart{XMLName: xml.Name{Space: "", Local: ""}, PartNumber: k + 1, ETag: v}
			parts = append(parts, *part)
		}
		objactAcl := oss.ObjectACL(oss.ACLPrivate)                           //继承bucket的权限
		_, err := bucket.CompleteMultipartUpload(*newImur, parts, objactAcl) //合并
		if err != nil {
			fmt.Println(err)
			return nil, errors.New("文件合并失败，请重新上传")
		}
		global.OC_REDIS.Del(uuid, uuid+"-upload_id", uuid+"-status")

		err = file_utils.SaveFileToMysql(filename, totalSize, newFilename, fileType, pid, claims)
		if err != nil {
			return nil, err
		}

		progress.Status = 1
		return progress, nil
	}
	return progress, nil
}

func (*File) UploadSingleFile(claims tools.BaseClaims, file multipart.File, filename string, uuid string, pid int, fileType string, totalSize int64) (data *admin_res.FileProgress, err error) {
	bucket, err := upload.NewBucket()
	if err != nil {
		fmt.Println(err, "1")
		return nil, errors.New(e.ErrorInfo)
	}
	var newFilename = file_utils.GenerateFIleName(filename, uuid)
	err = bucket.PutObject(newFilename, file)
	if err != nil {
		fmt.Println(err, 2)
		return nil, errors.New(e.ErrorInfo)
	}
	var progress = &admin_res.FileProgress{UUID: uuid, ChunkTotal: 1, Index: 1, Status: 1}
	global.OC_REDIS.Del(uuid + "-status") //清除完成状态
	err = file_utils.SaveFileToMysql(filename, totalSize, newFilename, fileType, pid, claims)
	if err != nil {
		return nil, err
	}

	return progress, nil

}

var FileService = new(File)
