using Sirenix.OdinInspector;
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityWebSocket;
using UnityEngine.AddressableAssets;
[RequireComponent(typeof(AudioSource))]
public class VAudio : MonoBehaviour
{
    //规划为手下有节点播放栈，目前都用AudioSource

    public Stack<AudioSource> nodes;
    public static VAudio _instance;

    [LabelText("启用语言识别")] public bool AudioRecognitionEnable = true;
    private static VAduio_RecognitionModule recognitionModule;
    public void Init()
    {
        _instance = this;
        nodes = new Stack<AudioSource>();
        for (int i = 0; i < 5; i++)
        {
            var source = transform.UniqueChild<AudioSource>($"VAudioNode{i}");
            nodes.Push(source);
        }

        if (AudioRecognitionEnable)
        {
            recognitionModule = new VAduio_RecognitionModule();
            recognitionModule.Init();
            onSpeechRecognition = null;
        }
    }

    public static void PlayAudio(AudioClip audioClip)
    {
        var source = _instance.nodes.Peek();
        source.PlayOneShot(audioClip);

    }

    public static void LoadPlayAudioAsync(string path)
    {
        Addressables.LoadAssetAsync<AudioClip>(path).Completed += (e) =>
        {
            if (e.Status == UnityEngine.ResourceManagement.AsyncOperations.AsyncOperationStatus.Succeeded)
            {
                var source = _instance.nodes.Peek();
                source.PlayOneShot(e.Result);
            }
        };
    }

    public static void StartRecordOffline()=> recognitionModule?.StartRecordOffline();
    public static void StopRecordOffline()=> recognitionModule?.StopRecordOffline();
    public static Action<string> onSpeechRecognition;

    public class VAduio_RecognitionModule 
    {

        public const string editorAddress = "ws://127.0.0.1:10095";
        public const string androidAddress = "ws://192.168.1.3:10095"; // 请替换为真机调试时本地服务器的 IP
        public string address;
        public WebSocket socket;
        public void Init()
        {
#if UNITY_EDITOR
            address = editorAddress;
#else
        address = androidAddress;
#endif
            socket = new WebSocket(address);
            socket.OnMessage += OnMessage;
            socket.ConnectAsync();

            microphoneDevice = Microphone.devices.Length > 0 ? Microphone.devices[0] : null;
            // Addressables.LoadAssetAsync<AudioClip>("notify").Completed += (e) =>
            // {
            //     notify = e.Result;
            // };
            //这个地方还得异步等待申请权限
            if (string.IsNullOrEmpty(microphoneDevice)) throw new System.Exception("没有找到麦克风！");
        }
        private AudioClip notify;
        public string microphoneDevice;
        public AudioClip offlineClip;
        public void StartRecordOffline()
        {
            offlineClip = Microphone.Start(microphoneDevice, false, 10, 16000);
            VAudio.PlayAudio(notify);

        }
        public void StopRecordOffline()
        {
            Microphone.End(microphoneDevice);
            byte[] wavData = offlineClip.ToBytes();

            socket.SendAsync("{\"mode\":\"offline\",\"wav_name\":\"test.wav\",\"is_speaking\":true,\"hotwords\":\"\",\"itn\":true}");
            socket.SendAsync(wavData);
            socket.SendAsync("{\"is_speaking\": false}");
        }

        private void OnMessage(object sender, MessageEventArgs e)
        {
            RecData data = JsonUtility.FromJson<RecData>(e.Data);
            //Debug.Log("Mode:" + data.mode + "   Receive: " + data.text);
            onSpeechRecognition?.Invoke(data.text);
        }

        // 定义与JSON结构匹配的C#类
        [Serializable]
        public class RecData
        {
            public bool is_final;
            public string mode;
            public List<StampSent> stamp_sents;
            public string text;
            public string timestamp;
            public string wav_name;
        }

        [Serializable]
        public class StampSent
        {
            public int end;
            public string punc;
            public int start;
            public string text_seg;
            public List<List<int>> ts_list;
        }
    }

}

public static class VAudioExtension
{
    public static byte[] ToBytes(this AudioClip clip)
    {
        //修改版本默认使用16位
        int res = 16;
        // Clip content:
        float[] samples = new float[clip.samples * clip.channels];
        clip.GetData(samples, 0);                                                               // The audio data in samples.
                                                                                                // Write all data to byte array:
        List<byte> wavFile = new List<byte>();
        // RIFF header:
        int size = res / 8;
        wavFile.AddRange(new byte[] { (byte)'R', (byte)'I', (byte)'F', (byte)'F' });            // "RIFF"
        wavFile.AddRange(System.BitConverter.GetBytes(samples.Length * size + 44 - 8));         // ChunkSize
        wavFile.AddRange(new byte[] { (byte)'W', (byte)'A', (byte)'V', (byte)'E' });            // "WAVE"
        wavFile.AddRange(new byte[] { (byte)'f', (byte)'m', (byte)'t', (byte)' ' });            // "fmt "
        wavFile.AddRange(System.BitConverter.GetBytes(16));                                     // Subchunk1Size (the next 16 bytes)
        wavFile.AddRange(System.BitConverter.GetBytes((ushort)1));                              // AudioFormat (1 for PCM)
        wavFile.AddRange(System.BitConverter.GetBytes((ushort)clip.channels));                  // NumChannels
        wavFile.AddRange(System.BitConverter.GetBytes(clip.frequency));                         // SampleRate
        wavFile.AddRange(System.BitConverter.GetBytes(clip.frequency * clip.channels * size));  // ByteRate
        wavFile.AddRange(System.BitConverter.GetBytes((ushort)(clip.channels * size)));         // BlockAlign
        wavFile.AddRange(System.BitConverter.GetBytes((ushort)res));                            // BitsPerSample
        wavFile.AddRange(new byte[] { (byte)'d', (byte)'a', (byte)'t', (byte)'a' });            // "data"
        wavFile.AddRange(System.BitConverter.GetBytes(samples.Length * size));                  // Subchunk2Size
                                                                                                // Add the audio data in bytes:
        for (int i = 0; i < samples.Length; i++)
        {
            switch (res)
            {
                case 16:
                    short sample16 = (short)(samples[i] * 32767f);
                    wavFile.AddRange(System.BitConverter.GetBytes(sample16));
                    break;
                case 24:
                    int sample24 = Mathf.FloorToInt(samples[i] * 8388607);
                    byte[] data = System.BitConverter.GetBytes(sample24);
                    wavFile.AddRange(new byte[] { data[0], data[1], data[2] });
                    break;
                case 32:
                    int sample32 = (int)(samples[i] * 2147483647f);
                    wavFile.AddRange(System.BitConverter.GetBytes(sample32));
                    break;
            }
        }
        // Return the byte array to be saved:
        return wavFile.ToArray();
    }
}


