﻿using Microsoft.ML.OnnxRuntime;
using NumpyDotNet;
using SentencePiece.NET;
using TorchSharp;

namespace SenseVoiceSharp
{
    public class ModelConfig
    {
        public string ModelPath { get; set; }
        public string EmbeddingPath { get; set; }
        public string BpePath { get; set; }
        public string CmvnPath { get; set; }
    }
    public class SenseVoiceTool
    {
        private readonly InferenceSession _session;
        private readonly torch.Tensor _embedding;
        private readonly SentencePieceProcessor _processor;
        private readonly WavFrontend _front;

        public SenseVoiceTool(ModelConfig config)
        {
            var options = new SessionOptions();
            options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_FATAL;
            options.LogVerbosityLevel = 4;
            //options.AppendExecutionProvider_CUDA();
            _session = new InferenceSession(config.ModelPath, options);
            var embedding = np.load(config.EmbeddingPath);
            _embedding = torch.tensor(embedding.AsFloatArray(), new long[] { embedding.shape[0], embedding.shape[1] });
            _processor = new SentencePieceProcessor();
            _processor.Load(config.BpePath);
            _front = new WavFrontend(config.CmvnPath);
        }

        public torch.Tensor LoadAudio(string audioPath)
        {
            var (samples, sampleRate, channels) = SenseVoiceHelpers.ReadWav(audioPath);
            if (sampleRate != 16000)
            {
                samples = SenseVoiceHelpers.ResampleAudio(
                    originalSamples: samples,
                    originalSampleRate: sampleRate,
                    targetSampleRate: 16000,
                    channels: channels
                );
            }
            var waveform = torch.tensor(samples).reshape(-1, channels).t().mean(new long[] {0}).reshape(-1);
            return waveform;
        }

        public string Inference(torch.Tensor speech, int language)
        {
            var languageQuery = _embedding[language].reshape(1, 1, -1);
            var indices = torch.tensor(new long[] { 1, 2 }, dtype: torch.@long);
            var eventEmoQuery = _embedding.index_select(0, indices).reshape(1, 2, -1);
            var textNormQuery = _embedding[torch.TensorIndex.Single(14)].reshape(1, 1, -1);
            var inputContent = torch.concatenate(new List<torch.Tensor> { languageQuery, eventEmoQuery, textNormQuery, speech }, 1);
            var inputLength = (int)inputContent.shape[1];
            var speechValue = OrtValue.CreateTensorValueFromMemory(
                inputContent.data<float>().ToArray(), new long[] { inputContent.shape[0], inputContent.shape[1], inputContent.shape[2] });
            var speechLengthValue = OrtValue.CreateTensorValueFromMemory(new Int64[] { inputLength }, new long[] { 1 });
            var inputs = new Dictionary<string, OrtValue>
             {
                 { "speech", speechValue },
                 { "speech_lengths", speechLengthValue }
             };
            var results = _session.Run(new RunOptions(), inputs, _session.OutputNames);
            var encoderOut = results[0].GetTensorDataAsSpan<float>().ToArray();
            var encoderOutArr = torch.tensor(encoderOut).reshape(inputContent.shape[0], inputContent.shape[1], -1)[0];
            var argmaxArr = encoderOutArr.argmax(-1);
            var arr_next = argmaxArr[1..];    // 从索引 1 开始到末尾
            var arr_prev = argmaxArr[..^1];   // 从开始到倒数第二个元素
            var diff = arr_next != arr_prev;
            var initial = torch.tensor(new[] { true }, dtype: torch.@bool);
            var mask = torch.cat(new[] { initial, diff }, dim: 0);
            var ret = argmaxArr[mask];
            ret = ret[ret != 0].to(torch.int32);
            var hypos = ret.data<int>().ToArray();
            var text = _processor.Decode(hypos);
            return text;
        }

        public string Transcribe(string audioPath)
        {
            var waveform = LoadAudio(audioPath);
            var audioFeats = _front.GetFeatures(waveform);
            var asrResult = Inference(audioFeats[torch.TensorIndex.Null, torch.TensorIndex.Ellipsis], 0);
            var result = SenseVoiceHelpers.ResRe(asrResult);
            return result;
        }
    }
}
