import Foundation
import Hub
import MLX
import MLXNN
import MLXRandom
import Vocos

// MARK: - 常微分方程求解器

func odeint_euler(fun: (Float, MLXArray) -> MLXArray, y0: MLXArray, t: MLXArray) -> MLXArray {
    var ys = [y0]
    var yCurrent = y0

    for i in 0..<(t.shape[0] - 1) {
        let tCurrent = t[i].item(Float.self)
        let dt = t[i + 1].item(Float.self) - tCurrent

        let k = fun(tCurrent, yCurrent)
        let yNext = yCurrent + dt * k

        ys.append(yNext)
        yCurrent = yNext
    }

    return MLX.stacked(ys, axis: 0)
}

func odeint_midpoint(fun: (Float, MLXArray) -> MLXArray, y0: MLXArray, t: MLXArray) -> MLXArray {
    var ys = [y0]
    var yCurrent = y0

    for i in 0..<(t.shape[0] - 1) {
        let tCurrent = t[i].item(Float.self)
        let dt = t[i + 1].item(Float.self) - tCurrent

        let k1 = fun(tCurrent, yCurrent)
        let mid = yCurrent + 0.5 * dt * k1

        let k2 = fun(tCurrent + 0.5 * dt, mid)
        let yNext = yCurrent + dt * k2

        ys.append(yNext)
        yCurrent = yNext
    }

    return MLX.stacked(ys, axis: 0)
}

func odeint_rk4(fun: (Float, MLXArray) -> MLXArray, y0: MLXArray, t: MLXArray) -> MLXArray {
    var ys = [y0]
    var yCurrent = y0

    for i in 0..<(t.shape[0] - 1) {
        let tCurrent = t[i].item(Float.self)
        let dt = t[i + 1].item(Float.self) - tCurrent

        let k1 = fun(tCurrent, yCurrent)
        let k2 = fun(tCurrent + 0.5 * dt, yCurrent + 0.5 * dt * k1)
        let k3 = fun(tCurrent + 0.5 * dt, yCurrent + 0.5 * dt * k2)
        let k4 = fun(tCurrent + dt, yCurrent + dt * k3)

        let yNext = yCurrent + (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4)

        ys.append(yNext)
        yCurrent = yNext
    }

    return MLX.stacked(ys)
}

// MARK: - F5TTS 主类（支持中英双语）

public class F5TTS: Module {
    public enum ODEMethod: String {
        case euler
        case midpoint
        case rk4
    }
    
    enum F5TTSError: Error {
        case unableToLoadModel
        case unableToLoadReferenceAudio
        case unableToDetermineDuration
        case invalidTextFormat
    }

    public let melSpec: MelSpec
    public let transformer: DiT

    let dim: Int
    let numChannels: Int
    let vocabMap: [String: Int]  // 统一词汇表（包含英文单字符和中文拼音）
    let _durationPredictor: DurationPredictor?

    init(
        transformer: DiT,
        melSpec: MelSpec,
        vocabMap: [String: Int],  // 重命名为vocabMap（支持多字符单元）
        durationPredictor: DurationPredictor? = nil
    ) {
        self.melSpec = melSpec
        self.numChannels = self.melSpec.nMels
        self.transformer = transformer
        self.dim = transformer.dim
        self.vocabMap = vocabMap
        self._durationPredictor = durationPredictor

        super.init()
    }

    // MARK: - 核心修改：双语文本编码逻辑

    /// 判断文本是否为中文拼音格式（空格分隔的多字符单元，如"ni3 hao3"）
    private func isChinesePinyin(_ text: String) -> Bool {
        // 拼音格式特征：包含空格分隔的单元，且可能包含数字（声调）
        let pinyinPattern = "^[a-zA-Z0-9]+( [a-zA-Z0-9]+)*$"
        return text.range(of: pinyinPattern, options: .regularExpression) != nil
    }

    /// 分割文本为编码单元（英文按字符，中文按拼音单元）
    private func splitTextIntoUnits(_ text: String) -> [String] {
        if isChinesePinyin(text) {
            // 中文：按空格分割拼音单元（如"ni3 hao3" -> ["ni3", "hao3"]）
            return text.components(separatedBy: .whitespaces).filter { !$0.isEmpty }
        } else {
            // 英文：按字符分割（如"hello" -> ["h", "e", "l", "l", "o"]）
            return text.map { String($0) }
        }
    }

    /// 将文本数组转换为词汇表索引
    private func textToIndices(_ texts: [String], paddingValue: Int = -1) -> MLXArray {
        let unitsList = texts.map { splitTextIntoUnits($0) }
        // 映射为索引（未找到的单元用<unk>）
        let indexedUnits = unitsList.map { units in
            units.map { unit in
                self.vocabMap[unit] ?? self.vocabMap["<unk>"] ?? 0
            }
        }
        // 转换为MLX数组并对齐长度
        let mlxArrays = indexedUnits.map { MLXArray($0) }
        return padSequence(mlxArrays, paddingValue: Float(paddingValue)).asType(.int32)
    }

    // MARK: - 采样逻辑

    private func sample(
        cond: MLXArray,
        text: [String],
        duration: Int? = nil,
        lens: MLXArray? = nil,
        steps: Int = 8,
        method: ODEMethod = .rk4,
        cfgStrength: Double = 2.0,
        swayCoef: Double? = -1.0,
        seed: Int? = nil,
        maxDuration: Int = 4096,
        vocoder: ((MLXArray) -> MLXArray)? = nil,
        progressHandler: ((Double) -> Void)? = nil
    ) throws -> (MLXArray, MLXArray) {
        MLX.eval(self.parameters())

        var cond = cond

        // 处理参考音频为梅尔频谱（确保形状为 [batch, seq_len, mel_dim]）
        if cond.ndim == 2 {
            // 原始音频波形形状为 [batch, samples]，转换为梅尔频谱
            cond = cond.reshaped([cond.shape[1]])  // 调整为 [samples]
            cond = self.melSpec(x: cond)  // 输出形状为 [1, seq_len, mel_dim]
            if cond.shape[0] == 1 && cond.shape.count == 3 {
                // 移除多余的batch维度（若存在），确保最终形状为 [batch, seq_len, mel_dim]
                cond = cond.squeezed(axis: 0)
            }
        }

        // 验证cond的基础形状
        guard cond.ndim == 3 else {
            throw F5TTSError.invalidTextFormat
        }
        let batch = cond.shape[0]
        let condSeqLen = cond.shape[1]
        let melDim = cond.shape[2]  // 梅尔频谱维度（通常为100）

        // 初始化长度掩码（基于参考音频长度）
        var lens = lens ?? MLX.full([batch], values: condSeqLen, type: Int32.self)

        // 双语文本编码（核心逻辑，保持维度正确）
        let inputText = textToIndices(text)  // 形状: [batch, text_seq_len]
        let textLens = MLXArray(text.map { splitTextIntoUnits($0).count })
            .reshaped([batch, 1])  // 形状: [batch, 1]
            .asType(DType.int32)
        lens = MLX.maximum(textLens, lens)  // 形状: [batch, 1]

        // 生成条件掩码（关键修复：确保形状为 [batch, seq_len, 1]）
        var condMask = lensToMask(t: lens)  // 形状: [batch, max_seq_len]
        // 扩展最后一个维度为1，确保能与梅尔频谱的[mel_dim]广播
        condMask = condMask.expandedDimensions(axis: -1)  // 形状: [batch, max_seq_len, 1]

        // 处理时长预测（兼容原始逻辑）
        var resolvedDuration: MLXArray? = duration.map { MLXArray($0) }

        if resolvedDuration == nil, let durationPredictor = self._durationPredictor {
            let estimatedDurationInSeconds = durationPredictor(cond, text: text).item(Float32.self)
            resolvedDuration = MLXArray(Int(Double(estimatedDurationInSeconds) * F5TTS.framesPerSecond))
        }

        guard let resolvedDuration else {
            throw F5TTSError.unableToDetermineDuration
        }

        print("生成音频长度：\(Double(resolvedDuration.item(Float32.self)) / F5TTS.framesPerSecond) 秒")

        // 调整时长并对齐条件特征
        var duration = resolvedDuration
        duration = MLX.clip(MLX.maximum(lens + 1, duration), min: 0, max: maxDuration)
        let maxDuration = duration.max().item(Int.self)

        // 填充条件特征至目标长度
        cond = MLX.padded(
            cond,
            widths: [
                .init((0, 0)),  // batch维度不填充
                .init((0, maxDuration - condSeqLen)),  // 时间维度填充至maxDuration
                .init((0, 0))   // 梅尔维度不填充
            ]
        )  // 填充后形状: [batch, maxDuration, mel_dim]

        // 同步填充掩码
        condMask = MLX.padded(
            condMask,
            widths: [
                .init((0, 0)),
                .init((0, maxDuration - condMask.shape[1])),  // 掩码时间维度同步填充
                .init((0, 0))
            ],
            value: MLXArray(false)
        )  // 填充后形状: [batch, maxDuration, 1]

        // 生成带掩码的条件特征
        let stepCond = MLX.where(condMask, cond, MLX.zeros(like: cond))  // 形状: [batch, maxDuration, mel_dim]

        // 生成批量掩码（若需要）
        let mask: MLXArray? = (batch > 1) ? lensToMask(t: duration) : nil

        // 神经ODE求解函数（核心计算逻辑）
        let fn: (Float, MLXArray) -> MLXArray = { t, x in
            // x形状: [batch, maxDuration, mel_dim]
            let pred = self.transformer(
                x: x,
                cond: stepCond,
                text: inputText,
                time: MLXArray(t),
                dropAudioCond: false,
                dropText: false,
                mask: mask
            )  // 输出形状: [batch, maxDuration, mel_dim]

            guard cfgStrength > 1e-5 else {
                pred.eval()
                return pred
            }

            // 空条件预测（用于CFG增强）
            let nullPred = self.transformer(
                x: x,
                cond: stepCond,
                text: inputText,
                time: MLXArray(t),
                dropAudioCond: true,
                dropText: true,
                mask: mask
            )  // 输出形状: [batch, maxDuration, mel_dim]

            progressHandler?(Double(t))

            // 应用CFG增强
            let output = pred + (pred - nullPred) * cfgStrength  // 形状保持一致
            output.eval()
            return output
        }

        // 初始化噪声输入（形状与目标一致）
        var y0: [MLXArray] = []
        for dur in duration {
            if let seed {
                MLXRandom.seed(UInt64(seed))
            }
            // 噪声形状: [dur, mel_dim]
            let noise = MLXRandom.normal([dur.item(Int.self), melDim])
            y0.append(noise)
        }
        let y0Padded = padSequence(y0, paddingValue: 0.0)  // 形状: [batch, maxDuration, mel_dim]

        // 生成时间步
        var t = MLXArray.linspace(Float32(0.0), Float32(1.0), count: steps)

        if let coef = swayCoef {
            t = t + coef * (MLX.cos(MLXArray(.pi) / 2 * t) - 1 + t)
        }

        // 选择ODE求解器
        let odeintFn = switch method {
        case .euler: odeint_euler
        case .midpoint: odeint_midpoint
        case .rk4: odeint_rk4
        }

        // 运行ODE求解（轨迹形状: [steps, batch, maxDuration, mel_dim]）
        let trajectory = odeintFn(fn, y0Padded, t)
        let sampled = trajectory[-1]  // 最终采样结果: [batch, maxDuration, mel_dim]

        // 融合条件特征与采样结果（关键：掩码维度匹配）
        var out = MLX.where(condMask, cond, sampled)  // 两者形状均为 [batch, maxDuration, mel_dim]

        // 应用声码器（若有）
        if let vocoder {
            out = vocoder(out)  // 转换为音频波形
        }
        out.eval()

        return (out, trajectory)
    }

    // MARK: - 公开生成方法（支持中英输入）

    public func generate(
        text: String,  // 支持：英文"hello"或中文"ni3 hao3"
        referenceAudioURL: URL? = nil,
        referenceAudioText: String? = nil,  // 参考文本也支持双语
        duration: TimeInterval? = nil,
        steps: Int = 8,
        method: ODEMethod = .rk4,
        cfg: Double = 2.0,
        sway: Double = -1.0,
        speed: Double = 1.0,
        seed: Int? = nil,
        progressHandler: ((Double) -> Void)? = nil
    ) async throws -> MLXArray {
        print("加载Vocos声码器...")
        let vocos = try await Vocos.fromPretrained(repoId: "lucasnewman/vocos-mel-24khz-mlx")

        // 加载参考音频和文本
        var audio: MLXArray
        let referenceText: String

        if let referenceAudioURL {
            audio = try F5TTS.loadAudioArray(url: referenceAudioURL)
            referenceText = referenceAudioText ?? ""
        } else {
            let refAudioAndCaption = try F5TTS.referenceAudio()
            (audio, referenceText) = refAudioAndCaption
        }

        let refAudioDuration = Double(audio.shape[0]) / Double(F5TTS.sampleRate)
        print("参考音频长度：\(refAudioDuration) 秒")

        // 处理文本（拼接参考文本和输入文本）
        let processedText: String
        if referenceText.isEmpty {
            processedText = text
        } else {
            processedText = "\(referenceText) \(text)"
        }

        // 生成音频
        let normalizedAudio = F5TTS.normalizeAudio(audio: audio)
        let (outputAudio, _) = try self.sample(
            cond: normalizedAudio.expandedDimensions(axis: 0),
            text: [processedText],
            duration: nil,
            steps: steps,
            method: method,
            cfgStrength: cfg,
            swayCoef: sway,
            seed: seed,
            vocoder: vocos.decode
        ) { progress in
            print("生成进度：\(progress)")
            progressHandler?(progress)
        }

        return outputAudio[audio.shape[0]...]
    }
}

// MARK: - 预训练模型加载（适配双语词汇表）

public extension F5TTS {
    static func fromPretrained(repoId: String, downloadProgress: ((Progress) -> Void)? = nil) async throws -> F5TTS {
        let modelDirectoryURL = try await Hub.snapshot(from: repoId, matching: ["*.safetensors", "*.txt"]) { progress in
            downloadProgress?(progress)
        }
        return try self.fromPretrained(modelDirectoryURL: modelDirectoryURL)
    }

    static func fromPretrained(modelDirectoryURL: URL) throws -> F5TTS {
        let modelURL = modelDirectoryURL.appendingPathComponent("model.safetensors")
        let modelWeights = try loadArrays(url: modelURL)

        // 加载梅尔滤波器组
        guard let filterbankURL = Bundle.module.url(forResource: "mel_filters", withExtension: "npy") else {
            throw F5TTSError.unableToLoadModel
        }
        let filterbank = try MLX.loadArray(url: filterbankURL)

        // 加载双语词汇表（关键：vocab.txt需包含英文单字符和中文拼音）
        let vocabURL = modelDirectoryURL.appendingPathComponent("vocab.txt")
        guard let vocabString = try String(data: Data(contentsOf: vocabURL), encoding: .utf8) else {
            throw F5TTSError.unableToLoadModel
        }

        let vocabEntries = vocabString.split(separator: "\n").map { String($0) }

        let vocab = Dictionary(uniqueKeysWithValues: zip(vocabEntries, vocabEntries.indices))

        // 加载时长预测器
        var durationPredictor: DurationPredictor?
        let durationModelURL = modelDirectoryURL.appendingPathComponent("duration_v2.safetensors")
        do {
            let durationModelWeights = try loadArrays(url: durationModelURL)

            let durationTransformer = DurationTransformer(
                dim: 512,
                depth: 8,
                heads: 8,
                dimHead: 64,
                ffMult: 2,
                textNumEmbeds: vocab.count,  // 适配双语词汇表大小
                textDim: 512,
                convLayers: 2
            )
            let predictor = DurationPredictor(
                transformer: durationTransformer,
                melSpec: MelSpec(filterbank: filterbank),
                vocabMap: vocab  // 传入双语词汇表
            )
            try predictor.update(parameters: ModuleParameters.unflattened(durationModelWeights), verify: [])

            durationPredictor = predictor
        } catch {
            print("警告：未找到时长预测器模型：\(error)")
        }

        // 初始化F5TTS模型（使用双语词汇表）
        let dit = DiT(
            dim: 1024,
            depth: 22,
            heads: 16,
            ffMult: 2,
            textNumEmbeds: vocab.count,  // 适配双语词汇表大小
            textDim: 512,
            convLayers: 4
        )
        let f5tts = F5TTS(
            transformer: dit,
            melSpec: MelSpec(filterbank: filterbank),
            vocabMap: vocab,  // 传入双语词汇表
            durationPredictor: durationPredictor
        )
        try f5tts.update(parameters: ModuleParameters.unflattened(modelWeights), verify: [.all])

        return f5tts
    }
}

// MARK: - 工具方法

public extension F5TTS {
    static var sampleRate: Int = 24000
    static var hopLength: Int = 256
    static var framesPerSecond: Double = .init(sampleRate) / Double(hopLength)

    static func loadAudioArray(url: URL) throws -> MLXArray {
        try AudioUtilities.loadAudioFile(url: url)
    }

    static func referenceAudio() throws -> (MLXArray, String) {
        guard let url = Bundle.module.url(forResource: "test_en_1_ref_short", withExtension: "wav") else {
            throw F5TTSError.unableToLoadReferenceAudio
        }

        return try (
            self.loadAudioArray(url: url),
            "Some call me nature, others call me mother nature."
        )
    }

    static func normalizeAudio(audio: MLXArray, targetRMS: Double = 0.1) -> MLXArray {
        let rms = Double(audio.square().mean().sqrt().item(Float.self))
        if rms < targetRMS {
            return audio * targetRMS / rms
        }
        return audio
    }

    static func estimatedDuration(refAudio: MLXArray, refText: String, text: String, speed: Double = 1.0) -> TimeInterval {
        let refDurationInFrames = refAudio.shape[0] / self.hopLength
        let refTextLength = splitTextIntoUnits(refText).count  // 适配双语长度计算
        let genTextLength = splitTextIntoUnits(text).count

        let refAudioToTextRatio = Double(refDurationInFrames) / Double(refTextLength)
        let textLength = Double(genTextLength) / speed
        let estimatedDurationInFrames = Int(refAudioToTextRatio * textLength)

        let estimatedDuration = TimeInterval(estimatedDurationInFrames) / Self.framesPerSecond
        print("预估生成时长：\(estimatedDuration) 秒（\(estimatedDurationInFrames) 帧）")

        return estimatedDuration
    }
    
    // 暴露文本分割方法供外部使用
    static func splitTextIntoUnits(_ text: String) -> [String] {
        let pinyinPattern = "^[a-zA-Z0-9]+( [a-zA-Z0-9]+)*$"
        if text.range(of: pinyinPattern, options: .regularExpression) != nil {
            return text.components(separatedBy: .whitespaces).filter { !$0.isEmpty }
        } else {
            return text.map { String($0) }
        }
    }
}

// MARK: - MLX工具方法

func lensToMask(t: MLXArray, length: Int? = nil) -> MLXArray {
    let maxLength = length ?? t.max(keepDims: false).item(Int.self)
    let seq = MLXArray(0..<maxLength)
    let expandedSeq = seq.expandedDimensions(axis: 0)
    let expandedT = t.expandedDimensions(axis: 1)
    return MLX.less(expandedSeq, expandedT)
}

func padToLength(_ t: MLXArray, length: Int, value: Float? = nil) -> MLXArray {
    let ndim = t.ndim

    guard let seqLen = t.shape.last, length > seqLen else {
        return t[0..., .ellipsis]
    }

    let paddingValue = MLXArray(value ?? 0.0)

    let padded: MLXArray
    switch ndim {
    case 1:
        padded = MLX.padded(t, widths: [.init((0, length - seqLen))], value: paddingValue)
    case 2:
        padded = MLX.padded(t, widths: [.init((0, 0)), .init((0, length - seqLen))], value: paddingValue)
    case 3:
        padded = MLX.padded(t, widths: [.init((0, 0)), .init((0, length - seqLen)), .init((0, 0))], value: paddingValue)
    default:
        fatalError("不支持的维度：\(ndim)")
    }

    return padded[0..., .ellipsis]
}

func padSequence(_ t: [MLXArray], paddingValue: Float = 0) -> MLXArray {
    let maxLen = t.map { $0.shape.last ?? 0 }.max() ?? 0
    let t = MLX.stacked(t, axis: 0)
    return padToLength(t, length: maxLen, value: paddingValue)
}
