using Concentus;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net.WebSockets;
using System.Text;
using System.Text.Json;
using System.Text.Json.Nodes;
using System.Threading;
using System.Threading.Tasks;
using WebRtcVadSharp;

namespace ConsoleSrv.Utils
{
    public class AliASRSession
    {
        private ClientWebSocket _webSocket = new();
        private CancellationTokenSource _cts = new();
        private string _taskId;

        public event Action<string,string, bool>? OnRecognized;
 
        public async Task StartAsync()
        {
            if (_isStarted) return;
            _taskId = Guid.NewGuid().ToString("N");// 生成32位随机ID 
            //_cts = new CancellationTokenSource();
            //_webSocket.Dispose();
            //_webSocket = new ClientWebSocket();

            _webSocket.Options.SetRequestHeader("Authorization", "sk-b15xxxxxxxx");
            _webSocket.Options.SetRequestHeader("X-DashScope-DataInspection", "enable");

            Console.WriteLine($"开始连接到阿里云ASR服务，任务ID: {_taskId}");
            await _webSocket.ConnectAsync(new Uri("wss://dashscope.aliyuncs.com/api-ws/v1/inference/"), _cts.Token);
            _ = Task.Run(() => ReceiveLoopAsync());

            _isStarted = true;
            _isSendAudio = true;
            _isFinish = false;
            var runTaskJson = GenerateRunTaskJson(_taskId);            
            await SendAsync(runTaskJson);
        }

        public async Task SendAudioAsync(byte[] data)
        {
            if (_isSendAudio==false) return;
            await _webSocket.SendAsync(new ArraySegment<byte>(data), WebSocketMessageType.Binary, true, _cts.Token);
        }

        public async Task FinishAsync()
        {
            var finishJson = GenerateFinishTaskJson(_taskId);
            await SendAsync(finishJson);
            _cts.Cancel();
        }

        private async Task SendAsync(string message)
        {
            var buffer = Encoding.UTF8.GetBytes(message);
            await _webSocket.SendAsync(new ArraySegment<byte>(buffer), WebSocketMessageType.Text, true, _cts.Token);
        }

        private async Task ReceiveLoopAsync()
        {
            var buffer = new byte[4096];
            while (!_cts.IsCancellationRequested && _webSocket.State == WebSocketState.Open)
            {
                var result = await _webSocket.ReceiveAsync(new ArraySegment<byte>(buffer), _cts.Token);
                var jsonStr = Encoding.UTF8.GetString(buffer, 0, result.Count);
                JsonNode? msg = JsonNode.Parse(jsonStr);
                var evt = msg?["header"]?["event"]?.GetValue<string>();

                switch (evt)
                {
                    case "task-started":
                        Console.WriteLine("任务开启成功");
                        OnRecognized?.Invoke(evt, "", false);
                        break;
                    case "result-generated":
                        var text = msg["payload"]?["output"]?["sentence"]?["text"]?.GetValue<string>();
                        Console.WriteLine($"识别结果：{text}");
                        OnRecognized?.Invoke(evt,text, false);
                        break;
                    case "task-finished":
                        var text2 = msg["payload"]?["output"]?["sentence"]?["text"]?.GetValue<string>();
                        Console.WriteLine("任务完成");
                        OnRecognized?.Invoke(evt,text2, true);
                        //_cts.Cancel();
                        Console.ReadLine();
                        break;
                    case "task-failed":
                        OnRecognized?.Invoke(evt, "", false);
                        Console.WriteLine($"任务失败：{msg["header"]?["error_message"]?.GetValue<string>()}");
                        //_cts.Cancel();
                        break;
                }
                //if (evt == "result-generated")
                //{
                //    var sentence = msg["payload"]?["output"]?["sentence"];
                //    if (sentence != null)
                //    {
                //        var text = sentence["text"]?.GetValue<string>() ?? "";
                //        var isFinal = sentence["is_final"]?.GetValue<bool>() ?? false;
                //        Console.WriteLine($"识别结果: {text}, 是否最终结果: {isFinal}");
                //        OnRecognized?.Invoke(text, isFinal);
                //    }
                //}
            }
        }

        private static string GenerateRunTaskJson(string taskId) => JsonSerializer.Serialize(new
        {
            header = new
            {
                action = "run-task",
                task_id = taskId,
                streaming = "duplex"
            },
            payload = new
            {
                task_group = "audio",
                task = "asr",
                function = "recognition",
                model = "paraformer-realtime-v2",
                parameters = new
                {
                    format = "pcm",//pcm、wav、mp3、opus、speex、aac、amr。 
                    sample_rate = 16000,
                    vocabulary_id = "vocab-xxx-24ee19fa8cfb4d52902170a0xxxxxxxx",
                    disfluency_removal_enabled = false
                },
                // 不使用热词功能时，不要传递resources参数
                //["resources"] = new JsonArray {
                //    new JsonObject {
                //        ["resource_id"] = "xxxxxxxxxxxx",
                //        ["resource_type"] = "asr_phrase"
                //    }
                //},
                input = new { }
            }
        });

        private static string GenerateFinishTaskJson(string taskId) => JsonSerializer.Serialize(new
        {
            header = new
            {
                action = "finish-task",
                task_id = taskId,
                streaming = "duplex"
            },
            payload = new
            {
                input = new { }
            }
        });

        //录音成员
        private static List<short> recordingBuffer = new();
        private static DateTime _lastVoiceTime = DateTime.UtcNow;
        private bool _isSpeaking = false;
        private bool _isStarted = false;
        private bool _isSendAudio = false;
        private bool _isFinish = false;

        private static readonly object _lock = new();

        //检测人声成员
        private static double globalNoiseEnergy = -1; // 全局变量存储噪声能量
        private static DateTime lastNoiseUpdate = DateTime.MinValue; // 记录上次噪声更新时间
        private static readonly object lockObj = new(); // 线程安全锁


        public async void ProcessAndRecognizeAudioAsync(byte[] buffer, int result_Count)
        {
            short[] shortPcm = OpusToShort(buffer, result_Count);
            if (shortPcm == null || shortPcm.Length == 0) return;
            bool isHuman = IsHumanSpeech(shortPcm);

            await StartAsync();

            lock (_lock)
            {
                Console.WriteLine($"人声检测: {isHuman},是否SendAudio: {_isSendAudio},间隔{(DateTime.UtcNow - _lastVoiceTime).TotalMilliseconds}");
                if (isHuman && _isSendAudio)
                {
                   
                    byte[] finalSegment = ShortArrayToByteArray(shortPcm);
                    _ = Task.Run(async () =>
                    {
                        Console.WriteLine($"发送Audio");
                        await SendAudioAsync(finalSegment);
                       
                    });

                    //_isSpeaking = true;
                    _lastVoiceTime = DateTime.UtcNow;                     
                    //recordingBuffer.AddRange(shortPcm);                  
                }
                else if (_isSendAudio && (DateTime.UtcNow - _lastVoiceTime).TotalMilliseconds > 5000)
                {
                    //_isSpeaking = false;                    
                    _isStarted = false;
                    _isSendAudio = false;
                    _isFinish = true;
                    _ = Task.Run(async () =>
                    {
                        Console.WriteLine($"发送FinishAsync");
                        //await SendAudioAsync(finalSegment);
                        await FinishAsync();
                        //await StartAsync();
                    });
                }
            }
        }

        // 判断是否是人声
        private static bool IsHumanSpeech(short[] pcmBuffer, int sampleRate = 16000, double noiseFactor = 4, int noiseUpdateIntervalSec = 5)
        {
            if (pcmBuffer == null || pcmBuffer.Length == 0) return false;
            // 1. 计算当前帧的 RMS 能量
            double energy = 0;
            foreach (short sample in pcmBuffer)
            {
                energy += sample * sample;
            }
            energy = Math.Sqrt(energy / pcmBuffer.Length);

            // 2. 更新全局背景噪声值（只在一定时间后更新一次）
            lock (lockObj)
            {
                if (globalNoiseEnergy < 0 || (DateTime.Now - lastNoiseUpdate).TotalSeconds > noiseUpdateIntervalSec)
                {
                    globalNoiseEnergy = ComputeBackGroundNoise(pcmBuffer, sampleRate);
                    lastNoiseUpdate = DateTime.Now;
                    Console.WriteLine($"[噪声更新] 新的背景噪声: {globalNoiseEnergy:F1}");
                }
            }

            // 3. 计算动态阈值
            double dynamicThreshold = globalNoiseEnergy * noiseFactor;

            // 4. 判断是否是语音
            bool energyOver = energy > dynamicThreshold;
            bool containSpeech = DoesFrameContainSpeech(pcmBuffer);

            Console.WriteLine($"音量: {energy:F1}".PadRight(20) + $"噪音: {globalNoiseEnergy:F1}".PadRight(20) + $"阈值: {dynamicThreshold:F1}".PadRight(20) +
                "超阈:".PadRight(4) + (energyOver ? "√" : "_") + "人声:".PadRight(4) + (containSpeech ? "√" : "_"));
            return energyOver && containSpeech;
        }

        //检测噪声能量
        private static double ComputeBackGroundNoise(short[] pcmBuffer, int sampleRate = 16000, int segmentMs = 20, int minSegments = 5)
        {
            if (pcmBuffer == null || pcmBuffer.Length == 0) return 0;

            int samplesPerSegment = sampleRate * segmentMs / 1000; // 每个片段的采样点数
            int totalSegments = pcmBuffer.Length / samplesPerSegment; // 总片段数

            List<double> segmentEnergies = new();

            // 遍历所有片段，计算每个片段的 RMS 能量
            for (int i = 0; i < totalSegments; i++)
            {
                double energy = 0;
                for (int j = 0; j < samplesPerSegment; j++)
                {
                    int index = i * samplesPerSegment + j;
                    if (index >= pcmBuffer.Length) break;
                    energy += pcmBuffer[index] * pcmBuffer[index];
                }
                energy = Math.Sqrt(energy / samplesPerSegment);
                segmentEnergies.Add(energy);
            }

            // 选取最小的 minSegments 个片段计算背景噪声
            segmentEnergies.Sort(); // 升序排列
            int noiseSegmentCount = Math.Min(minSegments, segmentEnergies.Count);

            double noiseEnergy = 0;
            for (int i = 0; i < noiseSegmentCount; i++)
            {
                noiseEnergy += segmentEnergies[i];
            }

            return noiseEnergy / noiseSegmentCount; // 计算均值作为背景噪声能量
        }

        //判断是否是语音
        private static bool DoesFrameContainSpeech(short[] pcmBuffer, int sampleRate = 16000, int frameMs = 20)
        {
            int samplesPerFrame = sampleRate * frameMs / 1000; // 计算每帧样本数
            using var vad = new WebRtcVad();
            vad.OperatingMode = OperatingMode.VeryAggressive; // 不敏感 否则一些背景噪声或静音部分为语音会漏检
            for (int i = 0; i + samplesPerFrame <= pcmBuffer.Length; i += samplesPerFrame)
            {
                byte[] pcmFrame = new byte[samplesPerFrame * sizeof(short)]; // 16-bit PCM
                Buffer.BlockCopy(pcmBuffer, i * sizeof(short), pcmFrame, 0, pcmFrame.Length); // 复制数据

                if (vad.HasSpeech(pcmFrame, SampleRate.Is16kHz, FrameLength.Is20ms))
                {
                    return true; // 只要有一帧是语音，就返回 true
                }
            }
            return false;
        }

        private static byte[] ShortArrayToByteArray(short[] shorts)
        {
            byte[] bytes = new byte[shorts.Length * 2];
            Buffer.BlockCopy(shorts, 0, bytes, 0, bytes.Length);
            return bytes;
        }

        public short[] OpusToShort(byte[] buffer,int result_Count)
        {
            string asrText = "";
            int sampleRate = 16000;  // 16kHz 采样率
            int channels = 1;        // 单声道
            int frameSize = sampleRate / 1000 * 60; // 60ms 一帧 = 960 samples

            List<short> recordingBuffer = new List<short>(); // 录音缓存
            bool isRecording = false;
            DateTime lastVoiceTime = DateTime.UtcNow;
            DateTime recordingStartTime = DateTime.MinValue;
            int silenceTimeout = 2000; // 2000ms 停止录音
            int minRecordingDuration = 2000; // 最小录音时间

            using (IOpusDecoder decoder = OpusCodecFactory.CreateDecoder(sampleRate, channels, null))
            {
                List<byte> opusBuffer = new();  // 存储 Opus 数据的缓冲区
                                                // 读取 WebSocket 数据
                                                //opusBuffer.AddRange(buffer.Take(result.Count));
                opusBuffer.AddRange(buffer.Take(result_Count));
                byte[] opusFrame = opusBuffer.ToArray();
                opusBuffer.Clear(); // 清空缓冲区，为下一帧做准备                       
                short[] pcmBuffer = new short[frameSize * channels]; // **解码 Opus 帧**
                int decodedSamples = decoder.Decode(opusFrame.AsSpan(), pcmBuffer.AsSpan(), frameSize, false);
                return pcmBuffer;
            }
        }






    }

 





}

