﻿// AiViewModel.cs
using CommunityToolkit.Mvvm.ComponentModel;
using CommunityToolkit.Mvvm.Input;
using System.Collections.ObjectModel;
using System.Net.Http;
using System.Net.Http.Json;
using System.Speech.Synthesis;
using System.Speech.Recognition;
using System.Threading.Tasks;
using NAudio.Wave;
using System.Windows.Threading;
using System.IO;
using System.Net.Http.Headers;
using System.Text;
using Microsoft.CognitiveServices.Speech.Audio;
using System.Diagnostics;
using Microsoft.CognitiveServices.Speech.Speaker;
using Windows_AI_Assistant.Helpers;
using static Betalgo.Ranul.OpenAI.ObjectModels.SharedModels.IOpenAIModels;
using System.ComponentModel;
using Windows_AI_Assistant.Services;
using System.Diagnostics.CodeAnalysis;
using Windows_AI_Assistant.Models;
using static System.Windows.Forms.VisualStyles.VisualStyleElement.Window;
using Microsoft.CognitiveServices.Speech.Dialog;
using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels;
using Betalgo.Ranul.OpenAI.Managers;
using Betalgo.Ranul.OpenAI;
using Microsoft.Windows.Themes;
using Wpf.Ui.Appearance;

namespace Windows_AI_Assistant.ViewModels.Pages
{
    public partial class AiViewModel : ObservableObject
    {
        // 文本输入
        [ObservableProperty]
        private string? testText;

        // 用户输入
        [ObservableProperty]
        private string? recognizedText;

        // AI 的回复
        [ObservableProperty]
        private string? aiResponse;

        // 状态信息
        [ObservableProperty]
        private string? status;

        // 系统支持的语音包列表
        [ObservableProperty]
        private ObservableCollection<VoiceItem>? voices;

        // 当前选中的语音包
        [ObservableProperty]
        private VoiceItem? selectedVoice;

        // 唤醒词
        [ObservableProperty]
        private string? wakeWords;

        // ini文件路径
        private static readonly string IniPath = System.IO.Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "Config.ini");

        // ini文件操作类
        private readonly IniFiles Ini = new(IniPath);

        // FlaSK API 地址
        private readonly HttpClient _httpClient = new();
        private readonly string _flaskUrl = "http://127.0.0.1:8080"; // Flask API 地址

        // 监听计时器
        private Stopwatch? _stopwatch;
        private Timer? _timer;
        private bool _isListening = false;

        // 对话日志数据库服务
        private readonly DialogContentService _dialogContentService = new();
        private string? _dialogId;
        private string? _userId;
        private string? _userName;
        private string? _role;
        private string? _content;

        // 数据库对话内容列表
        public ObservableCollection<DialogContent>? DialogContents;

        // 语音命令识别器
        private SpeechRecognitionEngine? _recognizerCmd;
        // 语音转文字识别器
        private SpeechRecognitionEngine? _recognizerText;
        // 文字转语音合成器
        private SpeechSynthesizer? _synthesizer;

        // 语音包名称
        private string? _voiceName;
        // API设置配置文件
        private string? _apiUrl;
        private string? _apiKey;
        private string? _apiModel;
        private string? _apiPrompt;

        // AI服务
        AIService _aiService = new();
        // AI请求消息列表
        IList<ChatMessage> _aiRequestMessages = [];


        // 语音合成队列
        private Queue<string> _speechQueue = new();
        // 语音合成锁
        private bool _isSpeaking = false;

        private Queue<string> _transcriptionQueue = new();
        private bool _isTranscribing = false;


        private bool _isInitialized = false;

        public AiViewModel()
        {
            if (!_isInitialized)
            {
                InitializeVoices();
                InitlizeIniSetting();
                InitializeRecognizer();
                _isInitialized = true;
            }
        }

        /// <summary>
        /// 初始化语音合成
        /// </summary>
        private void InitializeVoices()
        {
            // 初始化语音包列表
            _synthesizer = new SpeechSynthesizer();
            _synthesizer.SetOutputToDefaultAudioDevice(); // 输出到默认音频设备

            // 事件处理程序，当语音合成完成时触发
            _synthesizer.SpeakCompleted += Synthesizer_SpeakCompleted;

            // 初始化语音包列表
            Voices = new ObservableCollection<VoiceItem>();

            foreach (var voice in _synthesizer.GetInstalledVoices())
            {
                var info = voice.VoiceInfo;
                Voices.Add(new VoiceItem
                {
                    Name = info.Name,
                    Culture = info.Culture.ToString(),
                    Gender = info.Gender.ToString(),
                    DisplayName = $"{info.Name} ({info.Culture})"
                });
            }

            // 设置默认语音包
            SetVoice("Microsoft Zira Desktop"); // 替换为你想要的语音包名称
        }

        /// <summary>
        /// 语音合成完成后触发的事件处理程序
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void Synthesizer_SpeakCompleted(object sender, SpeakCompletedEventArgs e)
        {
            _isSpeaking = false;
            // 当前语音合成完成，处理下一个语音合成任务
            ProcessSpeechQueue();
        }

        /// <summary>
        /// 添加对话日志到数据库
        /// </summary>
        private void AddDialogContent()
        {
            if (!string.IsNullOrEmpty(_dialogId) &&!string.IsNullOrEmpty(_userId) &&! string.IsNullOrEmpty(_userName) &&!string.IsNullOrEmpty(_role) &&!string.IsNullOrEmpty(_content))
            {
                DialogContent newDialog = new DialogContent()
                {
                    DialogId = _dialogId,
                    UserId = _userId,
                    UserName = _userName,
                    Role = _role,
                    Content = _content,
                    Timestamp = DateTime.Now
                };
                _dialogContentService.AddDialogContent(newDialog);
                // DialogContents.Add(newDialog);
            }
        }

        /// <summary>
        /// 初始化API设置
        /// </summary>
        private void InitlizeIniSetting()
        {
            Ini.FindAndCreate(IniPath);
            _voiceName = Ini.IniReadValue("VoiceSetting", "VOICE_NAME");
            SetVoice(_voiceName);
            _apiUrl = Ini.IniReadValue("AISetting", "API_URL");
            _apiKey = Ini.IniReadValue("AISetting", "API_KEY");
            _apiModel = Ini.IniReadValue("AISetting", "API_MODEL");
            _apiPrompt = Ini.IniReadValue("AISetting", "API_PROMPT");
            WakeWords = Ini.IniReadValue("VoiceSetting", "WAKE_WORDS");
        }

        /// <summary>
        /// 设置指定的语音包
        /// </summary>
        /// <param name="voiceName">语音包名称</param>
        public void SetVoice(string voiceName)
        {
            // 查找并设置选中的语音包
            SelectedVoice = Voices.FirstOrDefault(v => v.Name == voiceName);

            if (SelectedVoice == null)
            {
                // 如果没有找到指定的语音包，则可以选择默认语音包或给出错误信息
                Status = "The specified voice package was not found, the default voice package has been selected.";
                // 默认选择第一个语音包
                if (Voices.Count > 0)
                {
                    SelectedVoice = Voices[0];
                }
            }
            else
            {
                Status = $"Selected voice pack: <{SelectedVoice.DisplayName}>";
            }
        }

        /// <summary>
        /// 保存选中的语音包名称到配置文件
        /// </summary>
        private void SaveSelectedVoiceName()
        {
            if (selectedVoice.Name != _voiceName)
            {
                Ini.IniWriteValue("VoiceSetting", "VOICE_NAME", SelectedVoice.Name);
                _voiceName = SelectedVoice.Name;
                Status = $"Change voice to: <{SelectedVoice.Name}>, successfully saved!";
            }
        }

        /// <summary>
        /// 当选择的语音包发生变化时保存语音包名称到配置文件
        /// </summary>
        /// <param name="args"></param>
        protected override void OnPropertyChanged(PropertyChangedEventArgs args)
        {
            base.OnPropertyChanged(args);

            if (args.PropertyName == nameof(SelectedVoice))
            {
                Task.Delay(500).ContinueWith(t => SaveSelectedVoiceName());
                // 处理 SelectedVoice 属性改变的逻辑
                // SaveSelectedVoiceName();
            }
        }

        /// <summary>
        /// 初始化语音命令识别器
        /// </summary>
        private void InitializeRecognizer()
        {
            // 停止识别命令
            StopRec(_recognizerCmd);
            Ini.IniWriteValue("VoiceSetting", "WAKE_WORDS", WakeWords);
            _recognizerCmd = new SpeechRecognitionEngine();
            _recognizerText = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("zh-CN"));
            // 创建一个Choices对象，包含你要识别的命令
            Choices commands = new Choices();
            ObservableCollection<string> commandsCollection = new ObservableCollection<string>(WakeWords.Split('|'));
            if (commandsCollection.Count > 1 && !string.IsNullOrEmpty(WakeWords))
            {
                MessageBox.Show("Wake words: " + commandsCollection.Count);
                foreach (string command in commandsCollection)
                {
                    commands.Add(command);
                }
            } else
            {
                commands.Add("你好小娜");
            }
            // 构建命令语法
            GrammarBuilder grammarBuilder = new GrammarBuilder(commands);
            Grammar grammar = new Grammar(grammarBuilder);

            // 构建语法
            GrammarBuilder grammarBuilder_text = new GrammarBuilder();
            // 创建一个包含所有可能单词的语法
            grammarBuilder_text.AppendDictation();
            Grammar grammar_text = new Grammar(grammarBuilder_text);

            // 调整识别器设置
            // _recognizerCmd.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", 60); // 例如，设置置信度阈值为60
            // _recognizerText.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", 60);

            // 加载语法到识别器
            _recognizerCmd.LoadGrammar(grammar);
            _recognizerText.LoadGrammar(grammar_text);

            // 设置识别器的输入为默认麦克风
            _recognizerCmd.SetInputToDefaultAudioDevice();
            _recognizerText.SetInputToDefaultAudioDevice();

            // 事件处理程序，当识别到语音时触发
            _recognizerCmd.SpeechRecognized += Recognizer_SpeechRecognized_Cmd;
            _recognizerText.SpeechRecognized += Recognizer_SpeechRecognized_Text;

            // 开始识别命令
            StartRec(_recognizerCmd);
        }

        /// <summary>
        /// 开始语音文字识别
        /// </summary>
        /// <param name="recognizer"></param>
        private void StartRec(SpeechRecognitionEngine recognizer)
        {
            // 开始异步识别
            recognizer.RecognizeAsync(RecognizeMode.Multiple);
        }

        /// <summary>
        /// 停止语音文字识别
        /// </summary>
        /// <param name="recognizer"></param>
        private void StopRec(SpeechRecognitionEngine recognizer)
        {
            if (recognizer!= null)
            {
                // 停止识别
                recognizer.RecognizeAsyncStop();
            }
        }

        /// <summary>
        /// 识别到语音命令时触发的事件处理程序
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private async void Recognizer_SpeechRecognized_Cmd(object sender, SpeechRecognizedEventArgs e)
        {
            if (e.Result.Text == "你好小娜")
            {
                // 停止识别命令
                StopRec(_recognizerCmd);
                // 获取识别到的音频流
                RecognizedAudio audioStream = e.Result.Audio;

                AudioHelper audioHelper = new AudioHelper();

                Guid guid = Guid.NewGuid();
                string audioFileName = $"{guid.ToString()}.wav";

                // 保存音频流到文件
                string filePath = audioHelper.SaveRecognizedAudioToFile(audioStream, audioFileName);

                // 将音频流传递给声纹识别接口
                bool voiceRecognitionResult = await CallVoiceprintRecognitionServiceAsync(filePath);

                // 删除临时音频文件
                if (File.Exists(filePath))
                {
                    // 文件存在，删除文件
                    File.Delete(filePath);
                }

                // 声纹识别成功
                if (voiceRecognitionResult)
                {
                    // 如果声纹识别成功，播放问候语将问候语加入队列
                    // _speechQueue.Enqueue($"你好{Status}，有什么可以帮助的吗？");
                    _speechQueue.Enqueue($"在");
                    ProcessSpeechQueue();
                    while (_isSpeaking)
                    {
                        // 等待语音合成完成
                        await Task.Delay(100);
                    }

                    // 清空之前的识别结果
                    RecognizedText = string.Empty;

                    // 开始识别文字
                    StartRecText();
                }
                else
                {
                    // 未识别成功则继续识别
                    StartRec(_recognizerCmd);
                }
            }
        }

        /// <summary>
        /// 识别到语音文字对话
        /// </summary>
        private async void StartRecText()
        {
            _isListening = true;
            _stopwatch = Stopwatch.StartNew();
            // 重新创建或重新设置计时器
            _timer?.Dispose(); // 如果已经存在，先销毁旧的计时器
            // 设置计时器，用于检测是否超过5秒没有新语音输入
            _timer = new Timer(async (object? state) =>
            {
                Status = _stopwatch.Elapsed.TotalSeconds.ToString("0.00");
                if (_isListening && _stopwatch.Elapsed.TotalSeconds >= 5)
                {
                    Status = "停止监听";
                    // 停止监听
                    _isListening = false;
                    StopRec(_recognizerText);
                    _timer?.Dispose();
                    _stopwatch.Stop();

                    while (_isTranscribing || _transcriptionQueue.Count > 0)
                    {
                        await Task.Delay(100);
                    }

                    // 如果有内容则发送给接口
                    if (!string.IsNullOrEmpty(RecognizedText))
                    {
                        await SendRecognizedTextToApiAsync(RecognizedText);
                    }
                    else
                    {
                        // 继续识别
                        StartRec(_recognizerCmd);
                    }
                }
            }, null, Timeout.Infinite, Timeout.Infinite);

            StartRec(_recognizerText);
            Status = "聆听中...";

            // 使用循环等待，每秒检查一次
            while (_isListening)
            {
                await Task.Delay(1000); // 每秒检查一次
                _timer.Change(0, Timeout.Infinite);
            }
        }

        /// <summary>
        /// 识别到语音文字时将文本更新到 RecognizedText 上
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void Recognizer_SpeechRecognized_Text(object sender, SpeechRecognizedEventArgs e)
        {
            // 处理识别到的文本
            if (_isListening && e.Result.Confidence >= 0.7) // 只有当置信度足够高时才更新文本
            {
                // 获取识别到的音频流
                RecognizedAudio audioStream = e.Result.Audio;

                AudioHelper audioHelper = new AudioHelper();

                Guid guid = Guid.NewGuid();
                string audioFileName = $"{guid.ToString()}.wav";

                // 保存音频流到文件
                string filePath = audioHelper.SaveRecognizedAudioToFile(audioStream, audioFileName);

                // 将音频流传递给语音识别接口
                OnTranscriptionReceived(filePath);

                // 重置计时器
                _stopwatch.Restart();
            }
        }


        /// <summary>
        /// 调用声纹识别接口
        /// </summary>
        /// <param name="audioFilePath">音频文件路径</param>
        /// <returns></returns>
        private async Task<bool> CallVoiceprintRecognitionServiceAsync(string audioFilePath)
        {
            try
            {
                Status = "Identify users...";

                using var content = new MultipartFormDataContent
                {
                    { new StreamContent(File.OpenRead(audioFilePath)), "audio", "recognized_audio.wav" }
                };

                var response = await _httpClient.PostAsync($"{_flaskUrl}/identify", content);
                response.EnsureSuccessStatusCode();

                var result = await response.Content.ReadFromJsonAsync<IdentifyResponse>();
                if (result != null && result.Name != null && result.Id != null)
                {
                    _userId = result.Id;
                    _userName = result.Name;
                    Status = result.Name;
                    // 添加一个短暂的延迟，让UI有时间更新
                    // await Task.Delay(200);
                    return true;
                } else {
                    return false;
                }
            }
            catch (System.Exception ex)
            {
                Status = $"Identification failed: {ex.Message}";
                return false;
            }
        }

        private async Task<bool> TranscribeAudioAsync(string audioFilePath)
        {
            try
            {
                Status = "Generating text...";

                Guid guid = Guid.NewGuid();
                string audioFileName = $"Temp/{guid.ToString()}.wav";
                using var content = new MultipartFormDataContent
                {
                    { new StreamContent(File.OpenRead(audioFilePath)), "audio", $"{audioFileName}.wav" }
                };

                var response = await _httpClient.PostAsync($"{_flaskUrl}/transcribe", content);
                response.EnsureSuccessStatusCode();

                var result = await response.Content.ReadFromJsonAsync<TranscribeRequest>();
                if (result != null && result.Transcription != null)
                {
                    RecognizedText += result.Transcription;
                    return true;
                }
                else
                {
                    return false;
                }
            }
            catch (System.Exception ex)
            {
                Status = $"Text generation failed: {ex.Message}";
                return false;
            }
        }

        // 交互按钮命令
        [RelayCommand]
        public void TestTTS()
        {
            SpeakAsync(TestText);
        }

        [RelayCommand]
        private async Task SaveWakeWords()
        {
            InitializeRecognizer();
            var uiMessageBox = new Wpf.Ui.Controls.MessageBox
            {
                Title = "提示",
                Content =
                "设置成功！",
                CloseButtonText = "知道了"
            };

            _ = await uiMessageBox.ShowDialogAsync();
        }

        /// <summary>
        /// 使用 Windows 内置 TTS 合成语音
        /// </summary>
        /// <param name="text">要合成的文本</param>
        private void SpeakAsync(string text = "测试")
        {
            // 使用选定语音包进行合成
            _synthesizer.SetOutputToDefaultAudioDevice(); // 输出到默认音频设备

            // 使用用户选择的语音包
            if (SelectedVoice != null)
            {
                _synthesizer.SelectVoice(SelectedVoice.Name);
            }

            // 设置其他属性
            _synthesizer.Volume = 100; // 音量 (0-100)
            _synthesizer.Rate = 0; // 语速 (-10 到 10)

            try
            {
                // 合成语音
                _synthesizer.SpeakAsync(text);
                Debug.WriteLine($"SpeakAsync called with text: {text}");
            }
            catch (Exception ex)
            {
                Debug.WriteLine($"Exception in SpeakAsync: {ex.Message}");
                _isSpeaking = false; // 确保在发生异常时释放锁
            }
        }

        /// <summary>
        /// 发送识别的文本到API
        /// </summary>
        /// <param name="content">要发送的文本</param>
        private async Task SendRecognizedTextToApiAsync(string content)
        {
            // 这里实现向API发送文本的逻辑
            OpenAIService openAIService = new OpenAIService(new OpenAIOptions()
            {
                ApiKey = _apiKey,
                BaseDomain = _apiUrl
            });

            if (string.IsNullOrEmpty(_dialogId))
            {
                Guid guid = Guid.NewGuid();
                _dialogId = guid.ToString();
                _aiRequestMessages.Add(new ChatMessage("system", _apiPrompt));
                _aiRequestMessages.Add(new ChatMessage("user", content));
            }
            else
            {
                _aiRequestMessages.Add(new ChatMessage("user", content));
            }
            _role = "user";
            _content = RecognizedText;
            AddDialogContent();
            AiResponse = string.Empty;
            string aiResponseContent = await _aiService.GetChatCompletionAsync(openAIService, _apiModel, _aiRequestMessages, OnMessageReceived, OnCompletionReceived);
            // 根据实际情况调整此方法的具体实现
            _aiRequestMessages.Add(new ChatMessage("assistant", aiResponseContent));
            _role = "assistant";
            _content = AiResponse;
            AddDialogContent();
            while (_isSpeaking || _speechQueue.Count > 0)
            {
                await Task.Delay(100);
            }
            RecognizedText = string.Empty;
            Status = "结束本次";
            StartRecText();
        }

        private void OnMessageReceived(string message)
        {
            AiResponse = message;
        }

        private void OnCompletionReceived(string completion)
        {
            // 将完成的消息加入队列
            _speechQueue.Enqueue(completion);
            // 开始处理队列中的语音合成任务
            ProcessSpeechQueue();
        }

        private void ProcessSpeechQueue()
        {
            // 如果已经有语音合成任务在进行，则直接返回
            if (_isSpeaking)
            {
                return;
            }

            _isSpeaking = true;

            if (_speechQueue.Count > 0)
            {
                string textToSpeak = _speechQueue.Dequeue();
                SpeakAsync(textToSpeak);
            }
            else
            {
                _isSpeaking = false;
            }
        }

        private void OnTranscriptionReceived(string audiofilePath)
        {
            _transcriptionQueue.Enqueue(audiofilePath);
            ProcessTranscriptionQueue();
        }

        private void ProcessTranscriptionQueue()
        {
            if (_isTranscribing)
            {
                return;
            }

            _isTranscribing = true;

            if (_transcriptionQueue.Count > 0)
            {
                string audioFilePath = _transcriptionQueue.Dequeue();
                Task.Run(async () =>
                {
                    bool result = await TranscribeAudioAsync(audioFilePath);
                    if (result)
                    {
                        _isTranscribing = false;
                    }
                });
            }
            else
            {
                _isTranscribing = false;
            }
        }
    }

    // VoiceItem 类：保存语音包信息
    public class VoiceItem
    {
        public string? Name { get; set; }
        public string? Culture { get; set; }
        public string? Gender { get; set; }
        public string? DisplayName { get; set; } // 显示在下拉菜单中的名称
    }

    public class AiRequest
    {
        public string? Text { get; set; }
        public string? UserName { get; set; }
        public string? UserId { get; set; }
    }

    public class TranscribeRequest
    {
        public string? Transcription { get; set; }
    }
}
