//
//  Transcriber.swift
//  SpeechAnalyzerDemo
//
//  同时兼容 iOS 26 及更高版本的 SpeechTranscriber，
//  以及旧系统上的 SFSpeechRecognizer。
//

import Speech
import AVFAudio

let EndSpaceTime:Double  = 2
var processCount = 0

@MainActor
final class Transcriber {

    // MARK: - 统一错误定义
    enum _Error: Error {
        case notAvailable
        case localeNotSupported
        case audioConverterCreationFailed
        case failedToConvertBuffer(String?)
        case speechRecognizerUnavailable
        case missingLocaleAsset

        var message: String {
            switch self {
            case .notAvailable:
                return "Transcriber is not available on the given device."
            case .localeNotSupported:
                return "Locale selected is not supported by transcriber."
            case .audioConverterCreationFailed:
                return "Fail to create Audio Converter"
            case .failedToConvertBuffer(let detail):
                return "Failed to convert buffer to the destination format. \(detail ?? "")"
            case .speechRecognizerUnavailable:
                return "当前语言暂不支持语音识别，请检查网络或更换语言后重试。"
            case .missingLocaleAsset:
                return "所选语音资源尚未下载，请保持网络连接或前往“设置 > 通用 > 语言与地区”下载对应语音包。"
            }
        }
    }

    // MARK: - 支持的语言列表（兼容新旧 API）
    static var supportedLocales: [(Locale, Bool)] {
        get async {
#if compiler(>=5.9)
            if #available(iOS 26.0, *) {
                let supported = await SpeechTranscriber.supportedLocales
                let installed = await SpeechTranscriber.installedLocales
                return supported.map { ($0, installed.contains($0)) }
            }
#endif
            let locales = SFSpeechRecognizer.supportedLocales()
            return locales.map { ($0, true) }
        }
    }

    let transcriptionEvents: AsyncThrowingStream<TranscriptionEvent, Error>
    private let implementation: any TranscriberImplementation

    init(locale: Locale) async throws {
#if compiler(>=5.9)
        if #available(iOS 26.0, *) {
            let impl = try await ModernTranscriber(locale: locale)
            implementation = impl
            transcriptionEvents = impl.eventsStream
            return
        }
#endif
        let legacy = try LegacyTranscriber(locale: locale)
        implementation = legacy
        transcriptionEvents = legacy.eventsStream
    }

    func transcribeFile(_ fileURL: URL) async throws {
        try await implementation.transcribeFile(fileURL)
    }

    func startRealTimeTranscription() async throws {
        try await implementation.startRealTimeTranscription()
    }

    func streamAudioToTranscriber(_ buffer: AVAudioPCMBuffer) {
        implementation.streamAudioToTranscriber(buffer)
    }

    func finalizePreviousTranscribing() async throws {
        try await implementation.finalizePreviousTranscribing()
    }

    func finishAnalysisSession() async {
        await implementation.finishAnalysisSession()
    }
}

// MARK: - 抽象实现接口
private protocol TranscriberImplementation: AnyObject {
    var  eventsStream: AsyncThrowingStream<TranscriptionEvent, Error> { get }
    func transcribeFile(_ fileURL: URL) async throws
    func startRealTimeTranscription() async throws
    func streamAudioToTranscriber(_ buffer: AVAudioPCMBuffer)
    func finalizePreviousTranscribing() async throws
    func finishAnalysisSession() async
}


#if compiler(>=5.9)
// MARK: - iOS 26.0.0 的实现
@available(iOS 26.0.0, *)
@MainActor
private final class ModernTranscriber: TranscriberImplementation {

    let eventsStream: AsyncThrowingStream<TranscriptionEvent, Error>

    private let analyzer: SpeechAnalyzer
    private let transcriber: SpeechTranscriber
    private var resultsTask: Task<Void, Never>?

    private let locale: Locale
    
    private var bestAvailableAudioFormat: AVAudioFormat?
    private var audioConverter: AVAudioConverter?
    
    private var inputStream: AsyncStream<AnalyzerInput>?
    private var inputContinuation: AsyncStream<AnalyzerInput>.Continuation?
    private var streamContinuation: AsyncThrowingStream<TranscriptionEvent, Error>.Continuation?

    private var didReleaseAssets = false

    // 用于检测完整句子和句子拆分
    private var previousText: String = ""
    private var accumulatedText: String = ""  // 累积的文本缓冲
    private var emittedSentences: Set<String> = []  // 已发送的句子（避免重复）

    init(locale: Locale) async throws {
        guard SpeechTranscriber.isAvailable else {
            throw Transcriber._Error.notAvailable
        }

        guard let resolvedLocale = await SpeechTranscriber.supportedLocale(equivalentTo: locale) else {
            throw Transcriber._Error.localeNotSupported
        }

        self.locale = resolvedLocale

        try await AssetInventory.reserve(locale: resolvedLocale)

        
        var streamContinuation: AsyncThrowingStream<TranscriptionEvent, Error>.Continuation?
        eventsStream = AsyncThrowingStream { continuation in
            streamContinuation = continuation
        }
        self.streamContinuation = streamContinuation
        
        let preset: SpeechTranscriber.Preset = .timeIndexedProgressiveTranscription
        transcriber = SpeechTranscriber(
            locale: resolvedLocale,
            transcriptionOptions: preset.transcriptionOptions,
            reportingOptions: preset.reportingOptions.union([.alternativeTranscriptions]),
            attributeOptions: preset.attributeOptions.union([.transcriptionConfidence])
        )

        analyzer = SpeechAnalyzer(
            modules: [transcriber],
            options: .init(priority: .userInitiated, modelRetention: .processLifetime)
        )

        bestAvailableAudioFormat = await SpeechAnalyzer.bestAvailableAudioFormat(compatibleWith: [transcriber])
        try await analyzer.prepareToAnalyze(in: bestAvailableAudioFormat, withProgressReadyHandler: nil)

        let installed = (await SpeechTranscriber.installedLocales).contains(resolvedLocale)
        if !installed {
            let downloadingMessage = AttributedString("所选语言语音包尚未安装，正在尝试从 Apple 服务器下载，请保持网络连接。")
            streamContinuation?.yield(TranscriptionEvent(text: downloadingMessage, isFinal: false, confidence: nil))
            do {
                if let installationRequest = try await AssetInventory.assetInstallationRequest(supporting: [transcriber]) {
                    try await installationRequest.downloadAndInstall()
                } else {
                    throw Transcriber._Error.missingLocaleAsset
                }
            } catch {
                let failMessage = AttributedString("语音资源下载失败，请前往“设置 > 通用 > 语言与地区”手动安装后重试。")
                streamContinuation?.yield(TranscriptionEvent(text: failMessage, isFinal: true, confidence: nil))
                throw Transcriber._Error.missingLocaleAsset
            }
            return
        }
        

        resultsTask = Task { [weak self] in
            guard let self else { return }
            do {
                for try await result in transcriber.results {
                    let currentText = String(result.text.characters)

                    if result.isFinal {
                        // 将当前文本累积到缓冲区
                        self.accumulatedText += currentText

                        // 从累积文本中提取完整句子
                        let (completeSentences, remainingText) = self.extractCompleteSentences(from: self.accumulatedText)

                        // 为每个完整句子发送事件（避免重复）
                        for sentence in completeSentences {
                            // 检查是否已经发送过这个句子
                            if !self.emittedSentences.contains(sentence) {
                                self.emittedSentences.insert(sentence)

                                // 构造 AttributedString
                                let attributedSentence = AttributedString(sentence)

                                // 发送完整句子事件
                                let event = TranscriptionEvent(
                                    text: attributedSentence,
                                    isFinal: true,
                                    confidence: result.text.transcriptionConfidence,
                                    isCompleteSentence: true
                                )
                                self.streamContinuation?.yield(event)
                            }
                        }

                        // 更新累积文本为剩余未完成的部分
                        self.accumulatedText = remainingText

                        // 如果还有未完成的文本（没有结束标点），作为临时结果发送
                        if !remainingText.isEmpty {
                            let volatileText = AttributedString(remainingText)
                            let volatileEvent = TranscriptionEvent(
                                text: volatileText,
                                isFinal: false,
                                confidence: result.text.transcriptionConfidence,
                                isCompleteSentence: false
                            )
                            self.streamContinuation?.yield(volatileEvent)
                        }

                        // 更新状态
                        self.previousText = currentText
                    } else {
                        // 对于非最终结果，直接发送临时文本
                        let event = TranscriptionEvent(
                            text: result.text,
                            isFinal: false,
                            confidence: result.text.transcriptionConfidence,
                            isCompleteSentence: false
                        )
                        self.streamContinuation?.yield(event)
                    }

                }
                self.streamContinuation?.finish()
            } catch {
                self.streamContinuation?.finish(throwing: error)
            }
        }
    }

    deinit {
        resultsTask?.cancel()
        streamContinuation?.finish()
    }

    
    func transcribeFile(_ fileURL: URL) async throws {
        try await finalizePreviousTranscribing()

        let _ = fileURL.startAccessingSecurityScopedResource()
        defer { fileURL.stopAccessingSecurityScopedResource() }

        let audioFile = try AVAudioFile(forReading: fileURL)
        let cmTime = try await analyzer.analyzeSequence(from: audioFile)
        try await analyzer.finalize(through: cmTime)
    }

    
    func startRealTimeTranscription() async throws {
        try await finalizePreviousTranscribing()
        let streams = AsyncStream<AnalyzerInput>.makeStream(of: AnalyzerInput.self)
        inputStream = streams.stream
        inputContinuation = streams.continuation
        try await analyzer.start(inputSequence: streams.stream)
    }

    
    func streamAudioToTranscriber(_ buffer: AVAudioPCMBuffer) {
        guard let continuation = inputContinuation else { return }

        let format = bestAvailableAudioFormat ?? buffer.format
        var convertedBuffer = buffer

        do {
            convertedBuffer = try convertBuffer(buffer, to: format)
        } catch {
            print("Failed to convert audio buffer: \(error)")
        }

        continuation.yield(AnalyzerInput(buffer: convertedBuffer))
    }

    func finalizePreviousTranscribing() async throws {
        inputContinuation?.finish()
        inputStream = nil
        inputContinuation = nil
        try await analyzer.finalize(through: nil)
    }

    func finishAnalysisSession() async {
        inputContinuation?.finish()
        inputContinuation = nil
        inputStream = nil
        resultsTask?.cancel()
        resultsTask = nil
        streamContinuation?.finish()

        if !didReleaseAssets {
            await AssetInventory.release(reservedLocale: locale)
            didReleaseAssets = true
        }

        await analyzer.cancelAndFinishNow()
    }

    private func convertBuffer(_ buffer: AVAudioPCMBuffer, to format: AVAudioFormat) throws -> AVAudioPCMBuffer {
        let inputFormat = buffer.format

        guard inputFormat != format else {
            return buffer
        }

        if audioConverter == nil || audioConverter?.outputFormat != format {
            audioConverter = AVAudioConverter(from: inputFormat, to: format)
            audioConverter?.primeMethod = .none
        }

        guard let audioConverter else {
            throw Transcriber._Error.audioConverterCreationFailed
        }

        let sampleRateRatio = audioConverter.outputFormat.sampleRate / audioConverter.inputFormat.sampleRate
        let scaledInputFrameLength = Double(buffer.frameLength) * sampleRateRatio
        let frameCapacity = AVAudioFrameCount(scaledInputFrameLength.rounded(.up))

        guard let conversionBuffer = AVAudioPCMBuffer(pcmFormat: audioConverter.outputFormat, frameCapacity: frameCapacity) else {
            throw Transcriber._Error.failedToConvertBuffer("Failed to create AVAudioPCMBuffer.")
        }

        var nsError: NSError?
        var bufferProcessed = false

        let status = audioConverter.convert(to: conversionBuffer, error: &nsError) { _, inputStatusPointer in
            defer { bufferProcessed = true }
            inputStatusPointer.pointee = bufferProcessed ? .noDataNow : .haveData
            return bufferProcessed ? nil : buffer
        }

        guard status != .error else {
            throw Transcriber._Error.failedToConvertBuffer(nsError?.localizedDescription)
        }

        return conversionBuffer
    }

    // MARK: - 完整句子检测

    /// 检测是否为完整句子（Modern API版本）
    /// - Parameters:
    ///   - currentText: 当前文本
    ///   - previousText: 上一次的文本
    ///   - isFinal: 是否为最终结果
    /// - Returns: 是否为完整句子
    private func detectCompleteSentenceModern(
        currentText: String,
        previousText: String,
        isFinal: Bool
    ) -> Bool {
        // 如果文本为空，不是完整句子
        guard !currentText.isEmpty else { return false }

        // 检查是否以句尾标点结束
        let endPunctuation: Set<Character> = ["。", "！", "？", ".", "!", "?"]
        if let lastChar = currentText.last, endPunctuation.contains(lastChar) {
            // 如果文本与上一次不同（说明有新内容），认为是完整句子
            if currentText != previousText {
                return true
            }
        }

        // 如果是最终结果且有内容，认为是完整句子
        if isFinal && !currentText.isEmpty {
            return true
        }

        return false
    }

    // MARK: - 句子拆分

    /// 从累积文本中提取完整句子
    /// - Parameter text: 累积的文本
    /// - Returns: 元组（完整句子数组，剩余未完成的文本）
    private func extractCompleteSentences(from text: String) -> (sentences: [String], remainingText: String) {
        var sentences: [String] = []
        var currentSentence = ""

        // 句尾标点符号
        let endPunctuation: Set<Character> = ["。", "！", "？", ".", "!", "?"]

        // 使用私有使用区字符作为占位符，避免与实际文本冲突
        let ellipsisPlaceholder = "\u{E000}"

        // 先将省略号替换为占位符，避免被误判为句尾标点
        var processedText = text.replacingOccurrences(of: "…", with: ellipsisPlaceholder)
        processedText = processedText.replacingOccurrences(of: "...", with: ellipsisPlaceholder)

        // 遍历处理后的文本
        for char in processedText {
            currentSentence.append(char)

            // 如果遇到句尾标点，结束当前句子
            if endPunctuation.contains(char) {
                // 将占位符还原为省略号
                let restored = currentSentence.replacingOccurrences(of: ellipsisPlaceholder, with: "…")
                let trimmed = restored.trimmingCharacters(in: .whitespacesAndNewlines)

                if !trimmed.isEmpty {
                    sentences.append(trimmed)
                }
                currentSentence = ""
            }
        }

        // 将占位符还原为省略号
        currentSentence = currentSentence.replacingOccurrences(of: ellipsisPlaceholder, with: "…")
        let remainingText = currentSentence.trimmingCharacters(in: .whitespacesAndNewlines)

        return (sentences: sentences, remainingText: remainingText)
    }
}
#endif

// MARK: - iOS 26 及以下的实现

//- 利用SFSpeechRecognizer的segments信息获取精确的时间戳和停顿数据
//- 支持中英文双语识别
//- 实时处理和文件转录都会自动应用标点符号
//- 保持了原有的置信度信息
//
//现在LegacyTranscriber会根据语音停顿和语义自动添加合适的标点符号，提升了语音识别结果的可读性！

@MainActor
private final class LegacyTranscriber: TranscriberImplementation {

    let eventsStream: AsyncThrowingStream<TranscriptionEvent, Error>

    private let recognizer: SFSpeechRecognizer
    private var audioRequest: SFSpeechAudioBufferRecognitionRequest?
    private var recognitionTask: SFSpeechRecognitionTask?
    private var fileRequest: SFSpeechURLRecognitionRequest?

    private var streamContinuation: AsyncThrowingStream<TranscriptionEvent, Error>.Continuation?
    private var fileContinuation: CheckedContinuation<Void, Error>?
    private var didFinishStream = false
    private var currentFileURL: URL?

    // 用于检测完整句子和断句处理
    private var previousText: String = ""
    private var lastSegmentEndTime: TimeInterval = 0
    private var accumulatedText: String = ""  // 累积的文本缓冲
    private var emittedSentences: Set<String> = []  // 已发送的句子（避免重复）

    // MARK: - 实时转录优化

    /// 上次发送的实时文本
    private var lastPartialText: String = ""

    /// 上次发送实时结果的时间
    private var lastPartialUpdateTime: TimeInterval = 0

    /// 实时结果最小更新间隔（秒）
    private let minPartialUpdateInterval: TimeInterval = 0.15  // 150ms

    /// 实时结果最小置信度阈值
    private let minPartialConfidence: Float = 0.3

    /// 实时结果最小长度（字符数）
    private let minPartialLength: Int = 2

    // MARK: - 长时间转录支持

    /// 识别任务开始时间
    private var recognitionStartTime: TimeInterval = 0

    /// 单次识别任务最大时长（秒）- SFSpeechRecognizer 限制约 60 秒
    private let maxRecognitionDuration: TimeInterval = 55  // 留 5 秒缓冲

    /// 是否正在重启识别任务
    private var isRestartingRecognition = false

    /// 重启计时器
    private var restartTimer: Timer?

    /// 已发送句子的最大缓存数量
    private let maxEmittedSentencesCache = 100

    /// 临时结果缓存的最大数量
    private let maxPartialResultsCache = 20

    /// 上次清理缓存的时间
    private var lastCacheCleanupTime: TimeInterval = 0

    /// 缓存清理间隔（秒）
    private let cacheCleanupInterval: TimeInterval = 60  // 每分钟清理一次

    // MARK: - 流媒体缓冲机制

    /// 音频缓冲队列（避免音频数据丢失）
    private var audioBufferQueue: [AVAudioPCMBuffer] = []

    /// 缓冲队列锁
    private let bufferQueueLock = NSLock()

    /// 缓冲处理定时器
    private var bufferProcessTimer: Timer?

    /// 临时结果缓存（防止识别结果丢失）
    private var partialResultsCache: [String] = []

    /// 最大缓冲队列大小（防止内存溢出）
    private let maxBufferQueueSize = 100

    /// 缓冲处理间隔（毫秒）
    private let bufferProcessInterval: TimeInterval = 0.05  // 50ms

    init(locale: Locale) throws {
        guard let recognizer = SFSpeechRecognizer(locale: locale) else {
            throw Transcriber._Error.localeNotSupported
        }
        guard recognizer.isAvailable else {
            throw Transcriber._Error.speechRecognizerUnavailable
        }

        // 检查本地识别支持
        print("🔍 On-device recognition supported: \(recognizer.supportsOnDeviceRecognition)")

        // 注意：supportsOnDeviceRecognition 是只读属性，不能设置
        recognizer.defaultTaskHint = .dictation

        self.recognizer = recognizer

        var continuation: AsyncThrowingStream<TranscriptionEvent, Error>.Continuation?
        eventsStream = AsyncThrowingStream { cont in
            continuation = cont
        }
        streamContinuation = continuation
    }

    deinit {
        streamContinuation?.finish()
        stopBufferProcessing()
        stopAutoRestartTimer()
    }

    func transcribeFile(_ fileURL: URL) async throws {
        try ensureRecognizerAvailable()
        try await finalizePreviousTranscribing()

        return try await withCheckedThrowingContinuation { continuation in
            self.fileContinuation = continuation

            let request = SFSpeechURLRecognitionRequest(url: fileURL)
            // 实时汇报结果
            request.shouldReportPartialResults = true
            // 使用本地
            request.requiresOnDeviceRecognition = true
            // 文本转录
            request.taskHint = .dictation
            // 标点符号
            request.addsPunctuation = true
            
            if fileURL.startAccessingSecurityScopedResource() {
                self.currentFileURL = fileURL
            } else {
                self.currentFileURL = nil
            }

            self.fileRequest = request
            self.recognitionTask = self.recognizer.recognitionTask(with: request) { [weak self] result, error in
                self?.handleRecognitionCallback(result: result, error: error, isFileTask: true)
            }
            
        }
    }

    func startRealTimeTranscription() async throws {
        try ensureRecognizerAvailable()

        // 【重要】如果不是重启，先清理之前的状态
        // 如果是重启（isRestartingRecognition = true），则保留所有转录状态：
        // - accumulatedText：累积的文本（包含标点符号）
        // - emittedSentences：已发送的句子集合
        // - previousText：上一次的文本
        // 这样新的转录结果会继续拼接在后面
        if !isRestartingRecognition {
            try await finalizePreviousTranscribing()
        } else {
            print("🔄 Restarting mode: preserving transcription state...")
        }

        let request = SFSpeechAudioBufferRecognitionRequest()
        // 实时汇报结果
        request.shouldReportPartialResults = true
        // 使用本地
        request.requiresOnDeviceRecognition = true
        // 文本转录
        request.taskHint = .dictation
        // 标点符号
        request.addsPunctuation = true

        print("🎤 Starting real-time transcription with configuration:")
        print("   - On-device recognition: \(request.requiresOnDeviceRecognition)")
        print("   - Task hint: \(request.taskHint)")
        print("   - Add punctuation: \(request.addsPunctuation)")
        print("   - Partial results: \(request.shouldReportPartialResults)")
        print("   - Recognizer supports on-device: \(recognizer.supportsOnDeviceRecognition)")

        // 添加关键词过滤
//        request.contextualStrings = ["微积分", "量子力学", "相对论"]
        audioRequest = request
        recognitionTask = recognizer.recognitionTask(with: request) { [weak self] result, error in
            self?.handleRecognitionCallback(result: result, error: error, isFileTask: false)
        }

        // 检查识别任务是否成功创建
        if recognitionTask != nil {
            print("✅ Recognition task created successfully")
        } else {
            print("❌ Failed to create recognition task")
        }

        // 记录开始时间（使用 Date 而不是 AVAudioTime.machineTimeSeconds）
        recognitionStartTime = Date().timeIntervalSince1970

        print("⏱️ Recognition start time: \(recognitionStartTime)")

        // 启动音频缓冲处理
        startBufferProcessing()

        // 启动自动重启计时器（仅在非重启情况下）
        if !isRestartingRecognition {
            startAutoRestartTimer()
        }
    }

    func streamAudioToTranscriber(_ buffer: AVAudioPCMBuffer) {
        // 将音频缓冲加入队列而不是直接 append
        bufferQueueLock.lock()
        defer { bufferQueueLock.unlock() }

        // 检查队列大小，防止内存溢出
        if audioBufferQueue.count < maxBufferQueueSize {
            audioBufferQueue.append(buffer)
        } else {
            print("⚠️ Warning: Audio buffer queue is full, dropping buffer")
        }
    }

    func finalizePreviousTranscribing() async throws {
        // 停止缓冲处理
        stopBufferProcessing()

        // 停止自动重启计时器
        stopAutoRestartTimer()

        // 处理剩余的缓冲数据
        processRemainingBuffers()

        audioRequest?.endAudio()
        audioRequest = nil

        recognitionTask?.finish()
        recognitionTask?.cancel()
        recognitionTask = nil

        fileRequest = nil
        if let url = currentFileURL {
            url.stopAccessingSecurityScopedResource()
            currentFileURL = nil
        }

        if let fileContinuation {
            self.fileContinuation = nil
            fileContinuation.resume()
        }

        // 清理缓存和断句状态
        partialResultsCache.removeAll()
        accumulatedText = ""
        emittedSentences.removeAll()

        // 清理实时转录优化状态
        lastPartialText = ""
        lastPartialUpdateTime = 0

        // 清理长时间转录状态
        recognitionStartTime = 0
        isRestartingRecognition = false
    }

    func finishAnalysisSession() async {
        // 停止缓冲处理
        stopBufferProcessing()

        // 停止自动重启计时器
        stopAutoRestartTimer()

        // 处理剩余的缓冲数据
        processRemainingBuffers()

        audioRequest?.endAudio()
        audioRequest = nil
        recognitionTask?.finish()
        recognitionTask?.cancel()
        recognitionTask = nil
        if let fileContinuation {
            self.fileContinuation = nil
            fileContinuation.resume()
        }
        if let url = currentFileURL {
            url.stopAccessingSecurityScopedResource()
            currentFileURL = nil
        }
        if !didFinishStream {
            streamContinuation?.finish()
            didFinishStream = true
        }

        // 清理缓存和断句状态
        partialResultsCache.removeAll()
        accumulatedText = ""
        emittedSentences.removeAll()

        // 清理实时转录优化状态
        lastPartialText = ""
        lastPartialUpdateTime = 0

        // 清理长时间转录状态
        recognitionStartTime = 0
        isRestartingRecognition = false
    }

    private func handleRecognitionCallback(result: SFSpeechRecognitionResult?, error: Error?, isFileTask: Bool) {
        print("🔔 Recognition callback triggered - isFinal: \(result?.isFinal ?? false), error: \(error?.localizedDescription ?? "none")")

        // 定期清理缓存，避免内存泄漏
        cleanupCachesIfNeeded()

        // 检查是否需要自动重启（长时间转录）
        if !isFileTask && !isRestartingRecognition {
            let currentTime = Date().timeIntervalSince1970
            let duration = currentTime - recognitionStartTime
            if duration >= maxRecognitionDuration {
                print("⚠️ Recognition task running for \(Int(duration))s, restarting...")
                Task { @MainActor in
                    await self.restartRecognitionTask()
                }
                return
            }
        }

        if let error {
            // 如果是网络或服务错误，尝试重启
            let nsError = error as NSError
            if nsError.domain == "kAFAssistantErrorDomain" ||
               nsError.code == 216 || nsError.code == 209 {  // 常见的 Speech 错误码
                print("⚠️ Recognition error: \(error.localizedDescription), attempting restart...")
                Task { @MainActor in
                    await self.restartRecognitionTask()
                }
                return
            }

            if let fileContinuation {
                self.fileContinuation = nil
                fileContinuation.resume(throwing: error)
            }
            fileRequest = nil
            if let url = currentFileURL {
                url.stopAccessingSecurityScopedResource()
                currentFileURL = nil
            }
            if !didFinishStream {
                streamContinuation?.finish(throwing: error)
                didFinishStream = true
            }
            return
        }

        guard let result else { return }

        let confidence = result.bestTranscription.segments.last?.confidence

        // 处理最终结果
        if result.isFinal {
            // 【获取转录文本】
            // 注意：如果 request.addsPunctuation = true，formattedString 会自动包含标点符号
            // 如果 request.addsPunctuation = false，则使用自定义的标点符号处理
            let transcription = result.bestTranscription
            let rawText = transcription.formattedString

            print("📝 Raw transcription text: \"\(rawText)\"")
            print("📊 Segments count: \(transcription.segments.count)")

            // 如果原始文本为空，直接返回
            guard !rawText.isEmpty else {
                print("⚠️ Empty transcription result, skipping...")
                return
            }

            // 【文本累积】将文本累积到缓冲区
            // 注意：重启识别任务时，accumulatedText 不会被清除，新结果会继续拼接
            accumulatedText += rawText

            print("📚 Accumulated text: \"\(accumulatedText)\"")
            print("📏 Accumulated text length: \(accumulatedText.count) chars")

            // 【断句处理】从累积文本中提取完整句子（根据标点符号断句）
            let (completeSentences, remainingText) = extractSentencesFromText(accumulatedText)

            print("🔍 Extracted \(completeSentences.count) complete sentences")
            print("📦 Remaining text: \"\(remainingText)\"")

            // 【发送完整句子】为每个完整句子发送独立的事件（避免重复）
            for sentence in completeSentences {
                if !emittedSentences.contains(sentence) {
                    emittedSentences.insert(sentence)

                    let attributedSentence = AttributedString(sentence)
                    let event = TranscriptionEvent(
                        text: attributedSentence,
                        isFinal: true,
                        confidence: confidence.map { Double($0) },
                        isCompleteSentence: true
                    )
                    streamContinuation?.yield(event)

                    print("✅ Emitted complete sentence: \"\(sentence)\"")
                }
            }

            // 更新累积文本为剩余未完成的部分（没有结束标点的部分）
            accumulatedText = remainingText

            // 更新状态
            previousText = rawText
            if let lastSegment = result.bestTranscription.segments.last {
                lastSegmentEndTime = lastSegment.timestamp + lastSegment.duration
            }

            // 从临时结果缓存中移除已经最终化的内容
            partialResultsCache.removeAll()
        } else {
            // 实时结果优化处理（非最终结果）
            processPartialResult(result, confidence: confidence)
        }

        if isFileTask, result.isFinal {
            if let fileContinuation {
                self.fileContinuation = nil
                fileContinuation.resume()
            }
            fileRequest = nil
            if let url = currentFileURL {
                url.stopAccessingSecurityScopedResource()
                currentFileURL = nil
            }
        }
    }

    private func ensureRecognizerAvailable() throws {
        guard recognizer.isAvailable else {
            let message = AttributedString("当前设备暂不支持所选语言的语音识别，请检查网络或切换语言后再试。")
            streamContinuation?.yield(TranscriptionEvent(text: message, isFinal: true, confidence: nil))
            throw Transcriber._Error.speechRecognizerUnavailable
        }
    }

    // MARK: - 实时结果优化处理

    /// 优化处理实时转录结果
    /// - Parameters:
    ///   - result: 识别结果
    ///   - confidence: 置信度
    private func processPartialResult(_ result: SFSpeechRecognitionResult, confidence: Float?) {
        // 获取原始文本
        let originalText = result.bestTranscription.formattedString

        print("🔄 Processing partial result: \"\(originalText)\" (confidence: \(confidence ?? 0))")

        let cleanedText = originalText
        
        // 1. 文本清洗和格式化
//        let cleanedText = cleanPartialText(originalText)

//        // 2. 最小长度过滤
//        guard cleanedText.count >= minPartialLength else {
//            print("   ⏭️ Skipped: text too short (\(cleanedText.count) < \(minPartialLength))")
//            return
//        }
//
//        // 3. 置信度过滤
//        if let conf = confidence, conf < minPartialConfidence {
//            print("   ⏭️ Skipped: confidence too low (\(conf) < \(minPartialConfidence))")
//            return
//        }
//
//        // 4. 更新频率控制（防抖动）
        let currentTime = Date().timeIntervalSince1970
        let timeSinceLastUpdate = currentTime - lastPartialUpdateTime
        if timeSinceLastUpdate < minPartialUpdateInterval {
            // 但如果文本变化很大，强制更新
//            let similarity = calculateSimilarity(cleanedText, lastPartialText)
//            if similarity > 0.8 {  // 相似度超过80%，跳过更新
//                print("   ⏭️ Skipped: too soon since last update (\(String(format: "%.2f", timeSinceLastUpdate))s < \(minPartialUpdateInterval)s, similarity: \(String(format: "%.2f", similarity)))")
//                return
//            }
        }
//
//        // 5. 智能去重：检查与上次文本的相似度
//        let similarity = calculateSimilarity(cleanedText, lastPartialText)
//        if similarity > 0.95 {  // 相似度超过95%，跳过
//            print("   ⏭️ Skipped: too similar to last text (similarity: \(String(format: "%.2f", similarity)))")
//            return
//        }

        // 6. 缓存临时结果，避免数据丢失
        if !cleanedText.isEmpty && !partialResultsCache.contains(cleanedText) {
            partialResultsCache.append(cleanedText)
            // 限制缓存大小
            if partialResultsCache.count > 10 {
                partialResultsCache.removeFirst()
            }
        }

        // 7. 发送优化后的临时结果
        let attributed = AttributedString(cleanedText)
        let event = TranscriptionEvent(
            text: attributed,
            isFinal: false,
            confidence: confidence.map { Double($0) },
            isCompleteSentence: false
        )
        streamContinuation?.yield(event)

        print("   ✅ Sent partial text: \"\(cleanedText)\"")

        // 8. 更新状态
        lastPartialText = cleanedText
        lastPartialUpdateTime = currentTime
    }

    /// 清洗和格式化实时文本
    /// - Parameter text: 原始文本
    /// - Returns: 清洗后的文本
    private func cleanPartialText(_ text: String) -> String {
        var cleaned = text

        // 移除首尾空白
        cleaned = cleaned.trimmingCharacters(in: .whitespacesAndNewlines)

        // 移除多余的空格（连续空格替换为单个空格）
        cleaned = cleaned.replacingOccurrences(of: "\\s+", with: " ", options: .regularExpression)

        // 移除重复的标点符号
        cleaned = cleaned.replacingOccurrences(of: "([。！？.!?])\\1+", with: "$1", options: .regularExpression)

        return cleaned
    }

    /// 计算两个字符串的相似度（简化版 Levenshtein 距离）
    /// - Parameters:
    ///   - s1: 字符串1
    ///   - s2: 字符串2
    /// - Returns: 相似度 (0.0 - 1.0)
    private func calculateSimilarity(_ s1: String, _ s2: String) -> Double {
        guard !s1.isEmpty || !s2.isEmpty else { return 1.0 }
        guard !s1.isEmpty && !s2.isEmpty else { return 0.0 }

        let len1 = s1.count
        let len2 = s2.count
        let maxLen = max(len1, len2)

        // 如果长度差异太大，直接返回低相似度
        if abs(len1 - len2) > maxLen / 2 {
            return 0.0
        }

        // 简化计算：使用最长公共子序列比例
        let lcs = longestCommonSubsequence(s1, s2)
        return Double(lcs) / Double(maxLen)
    }

    /// 计算最长公共子序列长度
    private func longestCommonSubsequence(_ s1: String, _ s2: String) -> Int {
        let arr1 = Array(s1)
        let arr2 = Array(s2)
        let m = arr1.count
        let n = arr2.count

        guard m > 0 && n > 0 else { return 0 }

        // 只保留两行来优化空间复杂度
        var prev = [Int](repeating: 0, count: n + 1)
        var curr = [Int](repeating: 0, count: n + 1)

        for i in 1...m {
            for j in 1...n {
                if arr1[i - 1] == arr2[j - 1] {
                    curr[j] = prev[j - 1] + 1
                } else {
                    curr[j] = max(prev[j], curr[j - 1])
                }
            }
            swap(&prev, &curr)
        }

        return prev[n]
    }

    // MARK: - 长时间转录支持

    /// 自动重启识别任务（用于长时间转录）
    /// 重启时会保留所有已转录的内容（accumulatedText、emittedSentences等），
    /// 新的转录结果会继续拼接在后面，标点符号会自动添加
    private func restartRecognitionTask() async {
        guard !isRestartingRecognition else { return }

        isRestartingRecognition = true

        // 保存当前转录状态（用于日志）
        let currentAccumulatedLength = accumulatedText.count
        let currentSentenceCount = emittedSentences.count

        print("🔄 Restarting recognition task for long-duration transcription...")
        print("📊 Current state - Accumulated text: \(currentAccumulatedLength) chars, Sentences: \(currentSentenceCount)")

        // 1. 结束当前识别任务，但不清除任何转录状态
        //    注意：不调用 finalizePreviousTranscribing()，以保留 accumulatedText 和 emittedSentences
        recognitionTask?.finish()
        recognitionTask = nil

        // 2. 仅结束音频请求（不清除缓冲队列）
        audioRequest?.endAudio()
        audioRequest = nil

        // 3. 短暂延迟，让系统处理完当前任务的最终结果
        try? await Task.sleep(nanoseconds: 300_000_000)  // 300ms，确保最后的结果被处理

        // 4. 重新启动识别任务（startRealTimeTranscription 会检查 isRestartingRecognition 标志，不清除状态）
        do {
            try await startRealTimeTranscription()
            print("✅ Recognition task restarted successfully")
            print("📊 State preserved - Accumulated text: \(accumulatedText.count) chars, Sentences: \(emittedSentences.count)")
        } catch {
            print("❌ Failed to restart recognition task: \(error)")
            // 尝试完全重新初始化
            try? await Task.sleep(nanoseconds: 500_000_000)  // 500ms
            do {
                try await startRealTimeTranscription()
                print("✅ Recognition task reinitialized")
            } catch {
                print("❌ Failed to reinitialize recognition: \(error)")
            }
        }

        isRestartingRecognition = false
    }

    /// 定期清理缓存，避免内存泄漏
    private func cleanupCachesIfNeeded() {
        let currentTime = Date().timeIntervalSince1970
        let timeSinceLastCleanup = currentTime - lastCacheCleanupTime

        guard timeSinceLastCleanup >= cacheCleanupInterval else { return }

        lastCacheCleanupTime = currentTime

        // 清理 emittedSentences（保留最近的 N 条）
        if emittedSentences.count > maxEmittedSentencesCache {
            let excessCount = emittedSentences.count - maxEmittedSentencesCache / 2
            // 转为数组，移除最旧的一半
            var sentences = Array(emittedSentences)
            sentences.removeFirst(excessCount)
            emittedSentences = Set(sentences)
            print("🧹 Cleaned up \(excessCount) old sentences from cache")
        }

        // 清理 partialResultsCache
        if partialResultsCache.count > maxPartialResultsCache {
            let excessCount = partialResultsCache.count - maxPartialResultsCache / 2
            partialResultsCache.removeFirst(excessCount)
            print("🧹 Cleaned up \(excessCount) partial results from cache")
        }

        // 清理音频缓冲队列（如果队列接近满）
        bufferQueueLock.lock()
        if audioBufferQueue.count > maxBufferQueueSize * 4 / 5 {  // 80% 满
            let excessCount = audioBufferQueue.count - maxBufferQueueSize / 2
            audioBufferQueue.removeFirst(excessCount)
            print("⚠️ Audio buffer queue was 80% full, cleaned up \(excessCount) buffers")
        }
        bufferQueueLock.unlock()
    }

    /// 启动自动重启计时器
    private func startAutoRestartTimer() {
        stopAutoRestartTimer()

        restartTimer = Timer.scheduledTimer(
            withTimeInterval: maxRecognitionDuration,
            repeats: true
        ) { [weak self] _ in
            guard let self = self else { return }
            Task { @MainActor in
                await self.restartRecognitionTask()
            }
        }

        RunLoop.main.add(restartTimer!, forMode: .common)
        print("⏱️ Auto-restart timer started (interval: \(Int(maxRecognitionDuration))s)")
    }

    /// 停止自动重启计时器
    nonisolated private func stopAutoRestartTimer() {
        MainActor.assumeIsolated {
            restartTimer?.invalidate()
            restartTimer = nil
        }
    }

    // MARK: - 句子拆分

    /// 从文本中提取完整句子（根据标点符号断句）
    /// - Parameter text: 累积的文本
    /// - Returns: 元组（完整句子数组，剩余未完成的文本）
    private func extractSentencesFromText(_ text: String) -> (sentences: [String], remainingText: String) {
        var sentences: [String] = []
        var currentSentence = ""

        // 句尾标点符号
        let endPunctuation: Set<Character> = ["。", "！", "？", ".", "!", "?"]

        // 使用占位符处理省略号，避免被误判
        let ellipsisPlaceholder = "\u{E000}"
        var processedText = text.replacingOccurrences(of: "…", with: ellipsisPlaceholder)
        processedText = processedText.replacingOccurrences(of: "...", with: ellipsisPlaceholder)

        for char in processedText {
            currentSentence.append(char)

            // 如果遇到句尾标点，结束当前句子
            if endPunctuation.contains(char) {
                // 还原省略号
                let restored = currentSentence.replacingOccurrences(of: ellipsisPlaceholder, with: "…")
                let trimmed = restored.trimmingCharacters(in: .whitespacesAndNewlines)

                if !trimmed.isEmpty {
                    sentences.append(trimmed)
                }
                currentSentence = ""
            }
        }

        // 将占位符还原为省略号
        currentSentence = currentSentence.replacingOccurrences(of: ellipsisPlaceholder, with: "…")
        let remainingText = currentSentence.trimmingCharacters(in: .whitespacesAndNewlines)

        return (sentences: sentences, remainingText: remainingText)
    }

    // MARK: - 标点符号处理

    /// 为识别结果添加智能标点符号
    /// - Parameter transcription: SFSpeechRecognition转录结果
    /// - Returns: 处理后的带标点符号的文本
    private func processPunctuation(from transcription: SFTranscription) -> String {
        let segments = transcription.segments
        guard !segments.isEmpty else {
            return transcription.formattedString
        }

        var result = ""
        var previousEndTime: TimeInterval = 0

        for (index, segment) in segments.enumerated() {
            let substring = segment.substring
            let startTime = segment.timestamp
            let duration = segment.duration

            // 计算与上一个词的时间间隔（停顿时长）
            let pauseDuration = startTime - previousEndTime

            // 添加当前词
            if index == 0 {
                result.append(substring)
            } else {
                // 根据停顿时长决定标点
                if pauseDuration > 0.8 {
                    // 长停顿 - 添加句号
                    result.append("。")
                    result.append(substring)
                } else if pauseDuration > 0.4 {
                    // 中等停顿 - 添加逗号
                    result.append("，")
                    result.append(substring)
                } else {
                    // 短停顿或无停顿 - 直接添加
                    result.append(substring)
                }
            }

            previousEndTime = startTime + duration
        }

        // 检查最后一个词是否需要添加问号或感叹号
        if let lastSegment = segments.last {
            let lastWord = lastSegment.substring

            // 疑问语气词 - 添加问号
            if lastWord.hasSuffix("吗") || lastWord.hasSuffix("呢") ||
               lastWord.hasSuffix("啊") || lastWord.hasSuffix("吧") ||
               lastWord.contains("什么") || lastWord.contains("为什么") ||
               lastWord.contains("怎么") || lastWord.contains("哪") {
                if !result.hasSuffix("？") {
                    result.append("？")
                }
            }
            // 感叹语气
            else if lastWord.hasSuffix("啦") || lastWord.hasSuffix("哇") {
                if !result.hasSuffix("！") {
                    result.append("！")
                }
            }
            // 普通陈述句 - 添加句号
            else if !result.hasSuffix("。") && !result.hasSuffix("，") {
                result.append("。")
            }
        }

        return result
    }

    /// 为英文文本添加标点符号
    /// - Parameter transcription: SFSpeechRecognition转录结果
    /// - Returns: 处理后的带标点符号的文本
    private func processPunctuationForEnglish(from transcription: SFTranscription) -> String {
        let segments = transcription.segments
        guard !segments.isEmpty else {
            return transcription.formattedString
        }

        var result = ""
        var previousEndTime: TimeInterval = 0
        var shouldCapitalize = true

        for (index, segment) in segments.enumerated() {
            var substring = segment.substring
            let startTime = segment.timestamp
            let duration = segment.duration

            // 计算停顿时长
            let pauseDuration = startTime - previousEndTime

            // 首字母大写处理
            if shouldCapitalize {
                substring = substring.prefix(1).uppercased() + substring.dropFirst()
                shouldCapitalize = false
            }

            if index == 0 {
                result.append(substring)
            } else {
                // 根据停顿添加标点
                if pauseDuration > 0.8 {
                    // 长停顿 - 句号
                    result.append(". ")
                    substring = substring.prefix(1).uppercased() + substring.dropFirst()
                    result.append(substring)
                    shouldCapitalize = true
                } else if pauseDuration > 0.4 {
                    // 中等停顿 - 逗号
                    result.append(", ")
                    result.append(substring)
                } else {
                    // 添加空格
                    result.append(" ")
                    result.append(substring)
                }
            }

            previousEndTime = startTime + duration
        }

        // 句尾标点处理
        if let lastSegment = segments.last {
            let lastWord = lastSegment.substring.lowercased()

            // 疑问句判断
            let questionWords = ["what", "where", "when", "why", "who", "how", "which", "whose", "whom"]
            let firstWord = segments.first?.substring.lowercased() ?? ""

            if questionWords.contains(firstWord) || lastWord == "you" || lastWord == "it" {
                if !result.hasSuffix("?") {
                    result.append("?")
                }
            } else if !result.hasSuffix(".") && !result.hasSuffix(",") {
                result.append(".")
            }
        }

        return result
    }

    /// 根据语言环境选择合适的标点处理方法
    /// - Parameter transcription: 转录结果
    /// - Returns: 处理后的文本
    private func addSmartPunctuation(to transcription: SFTranscription) -> String {
        // 判断语言（简单判断：是否包含中文字符）
        let text = transcription.formattedString
        let isChinese = text.range(of: "\\p{Han}", options: .regularExpression) != nil

        if isChinese {
            return processPunctuation(from: transcription)
        } else {
            return processPunctuationForEnglish(from: transcription)
        }
    }

    // MARK: - 缓冲处理方法

    /// 启动音频缓冲处理定时器
    private func startBufferProcessing() {
        stopBufferProcessing()  // 先停止已有的定时器

        bufferProcessTimer = Timer.scheduledTimer(
            withTimeInterval: bufferProcessInterval,
            repeats: true
        ) { [weak self] _ in
            self?.processAudioBuffers()
        }

        // 确保定时器在主线程的 RunLoop 中运行
        RunLoop.main.add(bufferProcessTimer!, forMode: .common)
    }

    /// 停止音频缓冲处理
    nonisolated private func stopBufferProcessing() {
        MainActor.assumeIsolated {
            bufferProcessTimer?.invalidate()
            bufferProcessTimer = nil
        }
    }

    /// 处理音频缓冲队列（定时器回调）
    @objc private func processAudioBuffers() {
        bufferQueueLock.lock()
        defer { bufferQueueLock.unlock() }

        guard !audioBufferQueue.isEmpty else { return }
        guard let audioRequest = audioRequest else {
            print("⚠️ Audio request is nil, cannot process buffers")
            return
        }

        // 批量处理缓冲，提高效率
        let batchSize = min(audioBufferQueue.count, 5)  // 每次最多处理 5 个缓冲
        let buffersToProcess = Array(audioBufferQueue.prefix(batchSize))
        audioBufferQueue.removeFirst(batchSize)

        // 在锁外处理，避免阻塞
        for buffer in buffersToProcess {
            audioRequest.append(buffer)
        }

        // 每隔一段时间打印一次日志（避免刷屏）
        processCount += 1
        if processCount % 20 == 0 {  // 每处理 20 次打印一次
            print("🎵 Processed \(processCount * batchSize) audio buffers, queue size: \(audioBufferQueue.count)")
        }
    }

    /// 处理剩余的缓冲数据（在停止时调用）
    private func processRemainingBuffers() {
        var remainingBuffers: [AVAudioPCMBuffer] = []

        bufferQueueLock.lock()
        remainingBuffers = audioBufferQueue
        audioBufferQueue.removeAll()
        bufferQueueLock.unlock()

        guard !remainingBuffers.isEmpty else { return }

        // 在主 actor 上下文中访问 audioRequest
        MainActor.assumeIsolated {
            guard let audioRequest = self.audioRequest else { return }

            // 将所有剩余缓冲发送给识别器
            for buffer in remainingBuffers {
                audioRequest.append(buffer)
            }

            print("✅ Processed \(remainingBuffers.count) remaining audio buffers")
        }
    }
}
