package aliyun

import (
	"dmsv/etc"
	"fmt"
	"github.com/aliyun/aliyun-oss-go-sdk/oss"
	"github.com/beego/beego/v2/core/logs"
	"time"
)

type taskInfo struct {
	appId    string
	callId   string
	filePath string
	key      string
	retry    int
}

var (
	gUploadTaskChan chan *taskInfo
	gBucketName     string
	gEndPoint       string
)

const (
	max_task_len = 10000
)

func ossInit(accessId, accessKey string) {

	gBucketName = etc.Conf.Storage.Bucket
	gEndPoint = etc.Conf.Storage.EndPoint

	if len(gBucketName) == 0 {
		logs.Debug("录音文件存储本地....")
		return
	}

	gUploadTaskChan = make(chan *taskInfo, max_task_len)

	logs.Debug("文件上传线程已启动...")
	for i := 0; i < etc.Conf.Storage.UploadTask; i++ {
		go func(taskId int) {

			for {
				newTask := <-gUploadTaskChan

				client := createUplaodClient(gEndPoint,
					accessId, accessKey,
					gBucketName)
				if client == nil {

					time.Sleep(5)
					gUploadTaskChan <- newTask
					continue

				}

				err := client.PutObjectFromFile(newTask.key, newTask.filePath)
				if err != nil {
					logs.Error("[%v][%v]upload file :%v err:%v,retry :%v ", taskId, newTask.callId, newTask.filePath, err, newTask.retry)

					newTask.retry++
					if newTask.retry < 5 {
						gUploadTaskChan <- newTask
					}
					client = nil
				} else {
					logs.Debug("[%v][%v] %v upload success,retry:%v.", taskId, newTask.callId, newTask.filePath, newTask.retry)
				}
			}
		}(i)
	}
}

func createUplaodClient(endpoint, accessId, accessKey, bucketName string) *oss.Bucket {

	// 创建OSSClient实例。
	client, err := oss.New(endpoint, accessId, accessKey)
	if err != nil {
		logs.Error("创建客户端 Error:", err)
		return nil
	}

	// 获取存储空间。
	bucket, err := client.Bucket(bucketName)
	if err != nil {
		logs.Error("获取存储桶 Error:", err)
		return nil
	}

	return bucket
}

func UpLoadFile(appId, callId, filePath string) string {

	if filePath == "" {
		return ""
	}

	if len(gBucketName) == 0 {
		return filePath
	}

	key := fmt.Sprintf("%s/%s/%s.wav", appId, time.Now().Format("20060102"), callId)
	fileUrl := fmt.Sprintf("https://%s.%s/%s", gBucketName, gEndPoint, key)

	newTask := &taskInfo{
		appId:    appId,
		callId:   callId,
		filePath: filePath,
		key:      key,
	}

	if len(gUploadTaskChan) < max_task_len {
		gUploadTaskChan <- newTask
	} else {
		logs.Error("[%v] upload queue is full,drop the file:%v", callId, filePath)
	}

	return fileUrl
}
