package xunfeiyun

import (
	"bytes"
	"context"
	"crypto/hmac"
	"crypto/sha256"
	"device-admin/config"
	"encoding/base64"
	"encoding/json"
	"fmt"
	"io"
	"net/http"
	"net/url"
	"os"
	"strings"
	"time"

	"192.168.1.75/go-pkg/logx"
	"github.com/gorilla/websocket"
)

type XunFeiYun struct{}

/**
 * 语音听写流式 WebAPI 接口调用示例 接口文档（必看）：https://doc.xfyun.cn/rest_api/语音听写（流式版）.html
 * webapi 听写服务参考帖子（必看）：http://bbs.xfyun.cn/forum.php?mod=viewthread&tid=38947&extra=
 * 错误码链接：https://www.xfyun.cn/document/error-code （code返回错误码时必看）
 */
var (
	RecognitionURL = "wss://iat-api.xfyun.cn/v2/iat"
	HostUrl        = "wss://tts-api.xfyun.cn/v2/tts"
)

const (
	STATUS_FIRST_FRAME    = 0
	STATUS_CONTINUE_FRAME = 1
	STATUS_LAST_FRAME     = 2
)

// 语音识别
func (xfy XunFeiYun) SpeechRecognition(File string) (content string) {
	d := websocket.Dialer{
		HandshakeTimeout: 5 * time.Second,
	}
	//握手并建立websocket 连接
	XunFeiYunConn, resp, err := d.Dial(assembleAuthUrl(RecognitionURL, config.Config.XunFeiYun.ApiKey, config.Config.XunFeiYun.ApiSecret), nil)
	if err != nil {
		panic(readResp(resp) + err.Error())
	} else if resp.StatusCode != 101 {
		panic(readResp(resp) + err.Error())
	}
	defer XunFeiYunConn.Close()

	st := time.Now()
	//打开音频文件
	var frameSize = 1280                 //每一帧的音频大小
	var intervel = 40 * time.Millisecond //发送音频间隔
	//开启协程，发送数据
	ctx, xunfeiCancel := context.WithCancel(context.Background())
	defer xunfeiCancel()

	AllBytes, err := os.ReadFile(File) //拿到所有字节
	logx.Info(context.Background(), "bytes", logx.Any("bytes", string(AllBytes)))
	if err != nil {
		panic(err)
	}
	AllBytes = AllBytes[44:] //wav头部44字节
	var status = 0
	go func() {
		status = STATUS_FIRST_FRAME //音频的状态信息，标识音频是第一帧，还是中间帧、最后一帧
		var buffer = make([]byte, frameSize)
		reader := bytes.NewReader(AllBytes)
		for {
			len, err := reader.Read(buffer)
			if err != nil {
				if err == io.EOF { //文件读取完了，改变status
					status = STATUS_LAST_FRAME
				} else {
					panic(err)
				}
			}
			select {
			case <-ctx.Done():
				fmt.Println("session end ---")
				return
			default:
			}
			switch status {
			case STATUS_FIRST_FRAME: //发送第一帧音频，带business 参数
				frameData := map[string]interface{}{
					"common": map[string]interface{}{
						"app_id": config.Config.XunFeiYun.Appid, //appid 必须带上，只需第一帧发送
					},
					"business": map[string]interface{}{ //business 参数，只需一帧发送
						"language": "zh_cn",
						"domain":   "iat",
						"accent":   "mandarin",
					},
					"data": map[string]interface{}{
						"status":   STATUS_FIRST_FRAME,
						"format":   "audio/L16;rate=16000",
						"audio":    base64.StdEncoding.EncodeToString(buffer[:len]),
						"encoding": "raw",
					},
				}
				fmt.Println("send first", frameData)
				XunFeiYunConn.WriteJSON(frameData)
				status = STATUS_CONTINUE_FRAME
			case STATUS_CONTINUE_FRAME:
				frameData := map[string]interface{}{
					"data": map[string]interface{}{
						"status":   STATUS_CONTINUE_FRAME,
						"format":   "audio/L16;rate=16000",
						"audio":    base64.StdEncoding.EncodeToString(buffer[:len]),
						"encoding": "raw",
					},
				}
				XunFeiYunConn.WriteJSON(frameData)
			case STATUS_LAST_FRAME:
				frameData := map[string]interface{}{
					"data": map[string]interface{}{
						"status":   STATUS_LAST_FRAME,
						"format":   "audio/L16;rate=16000",
						"audio":    base64.StdEncoding.EncodeToString(buffer[:len]),
						"encoding": "raw",
					},
				}
				XunFeiYunConn.WriteJSON(frameData)
				fmt.Println("send last", frameData)
				return
			}
			//模拟音频采样间隔
			time.Sleep(intervel)
		}
	}()

	var Content string
	//获取返回的数据
	for {
		var resp = RespData{}
		_, msg, err := XunFeiYunConn.ReadMessage()
		if err != nil {
			fmt.Println("read message error:", err)
			break
		}
		json.Unmarshal(msg, &resp)
		fmt.Println("reslut:", resp.Data.Result.String(), resp.Sid)
		if resp.Code != 0 {
			fmt.Println(resp.Code, resp.Message, time.Since(st))
			return
		}
		var decoder Decoder
		decoder.Decode(&resp.Data.Result)
		Content += decoder.String()
		if resp.Data.Status == 2 { //当前音频流状态，1表示合成中，2表示合成结束
			fmt.Println(resp.Code, resp.Message, time.Since(st))
			break
		}
	}
	time.Sleep(1 * time.Second)
	return Content
}

// 语音合成
func (xfy XunFeiYun) SpeechSynthesis(srcText, soundSource string) (content []byte) {
	d := websocket.Dialer{
		HandshakeTimeout: 5 * time.Second,
	}
	//握手并建立websocket 连接
	XunFeiYunConn, resp, err := d.Dial(assembleAuthUrl(HostUrl, config.Config.XunFeiYun.ApiKey, config.Config.XunFeiYun.ApiSecret), nil)
	if err != nil {
		panic(readResp(resp) + err.Error())
	} else if resp.StatusCode != 101 {
		panic(readResp(resp) + err.Error())
	}
	defer XunFeiYunConn.Close()

	st := time.Now()
	frameData := map[string]interface{}{
		"common": map[string]interface{}{
			"app_id": config.Config.XunFeiYun.Appid, //appid 必须带上，只需第一帧发送
		},
		"business": map[string]interface{}{ //business 参数，只需一帧发送
			"vcn":   soundSource,
			"aue":   "raw",
			"speed": 50,
			"tte":   "UTF8",
		},
		"data": map[string]interface{}{
			"status":   STATUS_LAST_FRAME,
			"encoding": "UTF8",
			"text":     base64.StdEncoding.EncodeToString([]byte(srcText)),
		},
	}
	XunFeiYunConn.WriteJSON(frameData)

	//获取返回的数据
	var AudioBuffer = bytes.NewBuffer(nil)
	for {
		var resp = RespDataSynthesis{}
		_, msg, err := XunFeiYunConn.ReadMessage()
		if err != nil {
			fmt.Println("read message error:", err)
			break
		}
		json.Unmarshal(msg, &resp)
		if resp.Code != 0 {
			fmt.Println(resp.Code, resp.Message, time.Since(st))
			return
		}
		audiobytes, err := base64.StdEncoding.DecodeString(resp.Data.Audio)
		if err != nil {
			panic(err)
		}
		AudioBuffer.Write(audiobytes)
		if resp.Data.Status == 2 {
			fmt.Println(resp.Code, resp.Message, time.Since(st))
			break
		}
	}

	wavAudioString := PcmToWav(AudioBuffer.String(), 1, 16000)
	return []byte(wavAudioString)
}

type RespData struct {
	Sid     string `json:"sid"`
	Code    int    `json:"code"`
	Message string `json:"message"`
	Data    struct {
		Result Result `json:"result"`
		Status int    `json:"status"`
	} `json:"data"`
}

type RespDataSynthesis struct {
	Sid     string `json:"sid"`
	Code    int    `json:"code"`
	Message string `json:"message"`
	Data    struct {
		Audio  string `json:"audio,omitempty"`
		Ced    int    `json:"ced,omitempty"`
		Status int    `json:"status,omitempty"`
	} `json:"data"`
}

// 创建鉴权url  apikey 即 hmac username
func assembleAuthUrl(hosturl string, apiKey, apiSecret string) string {
	ul, err := url.Parse(hosturl)
	if err != nil {
		fmt.Println(err)
	}
	//签名时间
	date := time.Now().UTC().Format(time.RFC1123)
	//date = "Tue, 28 May 2019 09:10:42 MST"
	//参与签名的字段 host ,date, request-line
	signString := []string{"host: " + ul.Host, "date: " + date, "GET " + ul.Path + " HTTP/1.1"}
	//拼接签名字符串
	sgin := strings.Join(signString, "\n")
	//签名结果
	sha := HmacWithShaTobase64("hmac-sha256", sgin, apiSecret)
	//构建请求参数 此时不需要urlencoding
	authUrl := fmt.Sprintf("hmac username=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"", apiKey,
		"hmac-sha256", "host date request-line", sha)
	//将请求参数使用base64编码
	authorization := base64.StdEncoding.EncodeToString([]byte(authUrl))

	v := url.Values{}
	v.Add("host", ul.Host)
	v.Add("date", date)
	v.Add("authorization", authorization)
	//将编码后的字符串url encode后添加到url后面
	callurl := hosturl + "?" + v.Encode()
	return callurl
}

func HmacWithShaTobase64(algorithm, data, key string) string {
	mac := hmac.New(sha256.New, []byte(key))
	mac.Write([]byte(data))
	encodeData := mac.Sum(nil)
	return base64.StdEncoding.EncodeToString(encodeData)
}

func readResp(resp *http.Response) string {
	if resp == nil {
		return ""
	}
	b, err := io.ReadAll(resp.Body)
	if err != nil {
		panic(err)
	}
	return fmt.Sprintf("code=%d,body=%s", resp.StatusCode, string(b))
}

// 解析返回数据，仅供demo参考，实际场景可能与此不同。
type Decoder struct {
	results []*Result
}

func (d *Decoder) Decode(result *Result) {
	if len(d.results) <= result.Sn {
		d.results = append(d.results, make([]*Result, result.Sn-len(d.results)+1)...)
	}
	if result.Pgs == "rpl" {
		for i := result.Rg[0]; i <= result.Rg[1]; i++ {
			d.results[i] = nil
		}
	}
	d.results[result.Sn] = result
}

func (d *Decoder) String() string {
	var r string
	for _, v := range d.results {
		if v == nil {
			continue
		}
		r += v.String()
	}
	return r
}

type Result struct {
	Ls  bool   `json:"ls"`
	Rg  []int  `json:"rg"`
	Sn  int    `json:"sn"`
	Pgs string `json:"pgs"`
	Ws  []Ws   `json:"ws"`
}

func (t *Result) String() string {
	var wss string
	for _, v := range t.Ws {
		wss += v.String()
	}
	return wss
}

type Ws struct {
	Bg int  `json:"bg"`
	Cw []Cw `json:"cw"`
}

func (w *Ws) String() string {
	var wss string
	for _, v := range w.Cw {
		wss += v.W
	}
	return wss
}

type Cw struct {
	Sc int    `json:"sc"`
	W  string `json:"w"`
}

/*
*
dst:二进制字符串
numchannel:1=单声道，2=多声道
saplerate：采样率 8000/16000
*/
func PcmToWav(dst string, numchannel int, saplerate int) (resDst string) {
	byteDst := []byte(dst)
	longSampleRate := saplerate
	byteRate := 16 * saplerate * numchannel / 8
	totalAudioLen := len(byteDst)
	totalDataLen := totalAudioLen + 36
	var header = make([]byte, 44)
	// RIFF/WAVE header
	header[0] = 'R'
	header[1] = 'I'
	header[2] = 'F'
	header[3] = 'F'
	header[4] = byte(totalDataLen & 0xff)
	header[5] = byte((totalDataLen >> 8) & 0xff)
	header[6] = byte((totalDataLen >> 16) & 0xff)
	header[7] = byte((totalDataLen >> 24) & 0xff)
	//WAVE
	header[8] = 'W'
	header[9] = 'A'
	header[10] = 'V'
	header[11] = 'E'
	// 'fmt ' chunk
	header[12] = 'f'
	header[13] = 'm'
	header[14] = 't'
	header[15] = ' '
	// 4 bytes: size of 'fmt ' chunk
	header[16] = 16
	header[17] = 0
	header[18] = 0
	header[19] = 0
	// format = 1
	header[20] = 1
	header[21] = 0
	header[22] = byte(numchannel)
	header[23] = 0
	header[24] = byte(longSampleRate & 0xff)
	header[25] = byte((longSampleRate >> 8) & 0xff)
	header[26] = byte((longSampleRate >> 16) & 0xff)
	header[27] = byte((longSampleRate >> 24) & 0xff)
	header[28] = byte(byteRate & 0xff)
	header[29] = byte((byteRate >> 8) & 0xff)
	header[30] = byte((byteRate >> 16) & 0xff)
	header[31] = byte((byteRate >> 24) & 0xff)
	// block align
	header[32] = byte(2 * 16 / 8)
	header[33] = 0
	// bits per sample
	header[34] = 16
	header[35] = 0
	//data
	header[36] = 'd'
	header[37] = 'a'
	header[38] = 't'
	header[39] = 'a'
	header[40] = byte(totalAudioLen & 0xff)
	header[41] = byte((totalAudioLen >> 8) & 0xff)
	header[42] = byte((totalAudioLen >> 16) & 0xff)
	header[43] = byte((totalAudioLen >> 24) & 0xff)

	headerDst := string(header)
	resDst = headerDst + dst
	return
}
