using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Networking;
using Newtonsoft.Json;
using UnityEditor.PackageManager.Requests;
using Newtonsoft.Json.Linq;

public class GPTSoVITSTextToSpeech : TTS
{
    #region 配置参数
    [Header("参考音频路径（用于 GPT-SoVITS 项目）")]
    [SerializeField] private AudioClip m_ReferenceClip = null; // 参考音频

    [Header("参考音频的文本")]
    [SerializeField] private string m_ReferenceText = ""; // 参考文本

    [Header("参考音频的语言")]
    [SerializeField] private Language m_ReferenceTextLan = Language.中文; // 参考音频语言

    [Header("目标语言")]
    [SerializeField] private Language m_TargetTextLan = Language.中文; // 目标语言

    private string m_AudioBase64String = ""; // 参考音频的 Base64 编码
    [SerializeField] private string m_SplitType = "字符"; // 目标文本的分割类型
    [SerializeField] private int m_Top_k = 5;
    [SerializeField] private float m_Top_p = 1;
    [SerializeField] private float m_Temperature = 1;
    [SerializeField] private bool m_TextReferenceMode = false; // 是否使用文本作为参考模式

    #endregion

    private void Awake()
    {
        AudioTurnToBase64(); // 将参考音频转换为 Base64 字符串
    }

    /// <summary>
    /// 进行语音合成
    /// </summary>
    /// <param name="_msg">输入文本</param>
    /// <param name="_callback">回调，返回合成的音频</param>
    public override void Speak(string _msg, Action<AudioClip, string> _callback)
    {
        StartCoroutine(GetVoice(_msg, _callback)); // 启动协程获取合成的音频
    }

    /// <summary>
    /// 获取合成语音
    /// </summary>
    /// <param name="_msg">输入文本</param>
    /// <param name="_callback">回调</param>
    /// <returns>协程</returns>
    private IEnumerator GetVoice(string _msg, Action<AudioClip, string> _callback)
    {
        stopwatch.Restart(); // 开始计时

        string _postJson = GetPostJson(_msg); // 获取 POST 请求的 JSON 数据

        using (UnityWebRequest request = new UnityWebRequest(m_PostURL, "POST"))
        {
            byte[] data = System.Text.Encoding.UTF8.GetBytes(_postJson); // 将 JSON 数据转为字节数组
            request.uploadHandler = new UploadHandlerRaw(data);
            request.downloadHandler = new DownloadHandlerBuffer(); // 处理下载的音频

            request.SetRequestHeader("Content-Type", "application/json"); // 设置请求头

            yield return request.SendWebRequest(); // 发送请求

            if (request.responseCode == 200)
            {
                string _text = request.downloadHandler.text; // 获取返回的文本
                Response _response = JsonUtility.FromJson<Response>(_text); // 解析 JSON 响应
                string _wavPath = _response.data[0].name; // 获取音频路径

                if (_wavPath == "")
                {
                    // 如果音频路径为空，重新请求
                    StartCoroutine(GetVoice(_msg, _callback));
                }
                else
                {
                    StartCoroutine(GetAudioFromFile(_wavPath, _msg, _callback)); // 下载音频文件
                }

            }
            else
            {
                Debug.LogError("语音合成失败: " + request.error); // 错误处理
            }
        }

        stopwatch.Stop();
        Debug.Log("GPT-SoVITS语音合成时间:" + stopwatch.Elapsed.TotalSeconds); // 输出合成时间
    }

    /// <summary>
    /// 构建 POST 请求的 JSON 数据
    /// </summary>
    /// <param name="_msg">输入文本</param>
    /// <returns>JSON 字符串</returns>
    private string GetPostJson(string _msg)
    {
        if (m_ReferenceText == "" || m_ReferenceClip == null)
        {
            Debug.LogError("GPT-SoVITS缺少参考音频或参考文本");
            return null;
        }

        // 构建请求数据结构
        var jsonData = new
        {
            data = new List<object>
            {
                new { name = "audio.wav", data = "data:audio/wav;base64," + m_AudioBase64String },
                m_ReferenceText,
                m_ReferenceTextLan.ToString(),
                _msg,
                m_TargetTextLan.ToString(),
                m_SplitType,
                m_Top_k,
                m_Top_p,
                m_Temperature,
                m_TextReferenceMode
            }
        };

        // 序列化数据为 JSON 格式
        string jsonString = JsonConvert.SerializeObject(jsonData, Formatting.Indented);
        return jsonString;
    }

    /// <summary>
    /// 将参考音频转换为 Base64 字符串
    /// </summary>
    private void AudioTurnToBase64()
    {
        if (m_ReferenceClip == null)
        {
            Debug.LogError("GPT-SoVITS缺少参考音频");
            return;
        }
        byte[] audioData = WavUtility.FromAudioClip(m_ReferenceClip); // 将音频转换为字节数组
        string base64String = Convert.ToBase64String(audioData); // 将字节数组转为 Base64 字符串
        m_AudioBase64String = base64String; // 保存 Base64 字符串
    }

    /// <summary>
    /// 从文件中获取合成的音频
    /// </summary>
    /// <param name="_path">音频文件路径</param>
    /// <param name="_msg">输入的文本</param>
    /// <param name="_callback">回调</param>
    /// <returns>协程</returns>
    private IEnumerator GetAudioFromFile(string _path, string _msg, Action<AudioClip, string> _callback)
    {
        string filePath = "file://" + _path; // 构建文件路径
        using (UnityWebRequest request = UnityWebRequestMultimedia.GetAudioClip(filePath, AudioType.WAV))
        {
            yield return request.SendWebRequest(); // 发送请求并等待响应

            if (request.result == UnityWebRequest.Result.Success)
            {
                AudioClip audioClip = DownloadHandlerAudioClip.GetContent(request); // 获取音频内容
                _callback(audioClip, _msg); // 返回音频到回调
            }
            else
            {
                Debug.LogError("获取音频失败: " + request.error); // 错误处理
            }
        }
    }

    #region 数据结构定义

    [Serializable]
    public class Response
    {
        public List<AudioBack> data = new List<AudioBack>();
        public bool is_generating = true;
        public float duration;
        public float average_duration;
    }

    [Serializable]
    public class AudioBack
    {
        public string name = string.Empty;
        public string data = string.Empty;
        public bool is_file = true;
    }

    public enum Language
    {
        中文,
        英语,
        日语,
        韩语,
        法语,
        西班牙语
    }

    #endregion
}