//
//  SpeechAnalyzeManager.swift
//  SpeechAnalyzerDemo
//
//  迁移为 UIKit 友好的可观察控制器，替换原有 SwiftUI 绑定为 Combine 发布者。
//

import Combine
import Speech
import AVFAudio

// MARK: - 静态工具
extension SpeechAnalyzeManager {
    //
//    static var defaultLocale = Locale(identifier: "en")
//    if #available(iOS 16.0, *) {
    static let defaultLocale = Locale(
            languageCode: .english,
            script: nil,
            languageRegion: .unitedStates
        )
//    }
}

// MARK: - 错误映射
extension SpeechAnalyzeManager {
    enum _Error: Error {
        case failToCreateAudioCapturer
        case failToCreateTranscriber

        var message: String {
            switch self {
            case .failToCreateAudioCapturer:
                return "Failed to setup Audio Engine."
            case .failToCreateTranscriber:
                return "Failed to set up speech analyzer."
            }
        }
    }
}

// MARK: - 主体实现
@MainActor
final class SpeechAnalyzeManager: ObservableObject {

    // 使用 @Published 属性替换之前的 SwiftUI 观察模型。
    @Published var error: Error? {
        didSet {
            guard let error else { return }
            print(error.message)
            showError = true
            isSettingUp = false
        }
    }

    @Published var showError: Bool = false {
        didSet {
            if showError == false {
                error = nil
            }
        }
    }

    @Published private(set) var isSettingUp: Bool = false
    @Published private(set) var locale: Locale

    @Published private(set) var volatileTranscript: String = ""
    @Published private(set) var finalizedTranscript: String = ""
    @Published private(set) var transcriptionConfidence: Double?
    @Published private(set) var isTranscribing: Bool = false

    // 完整句子事件（用于弹幕展示）
    @Published private(set) var completeSentence: (text: String, confidence: Double?)?

    @Published private(set) var audioCapturerState: AudioCapturer.EngineState = .stopped {
        didSet {
            switch audioCapturerState {
            case .started:
                audioCapturingStartTime = AVAudioTime.machineTimeSeconds
            case .stopped:
                audioCapturingStartTime = nil
                audioInputEvents = nil
            case .paused:
                break
            }
        }
    }

    @Published private(set) var audioInputEvents: ([PowerLevel], ElapsedTime)?

    private var audioCapturingStartTime: TimeInterval?

    // 转为文字
    private var transcriber: Transcriber?
    
    // 获取语音流媒体
    private var audioCapturer: AudioCapturer?

    //
    private var transcriptionResultsTask: Task<Void, Error>?
    
    //
    private var audioInputTask: Task<Void, Error>?

    init(locale: Locale = SpeechAnalyzeManager.defaultLocale) {
        self.isSettingUp = true
        self.locale = locale

        Task { @MainActor [weak self] in
            guard let self else { return }
            do {
                try await self.setupTranscriber(locale: locale)
                try self.setupAudioCapturer()
                self.isSettingUp = false
            } catch {
                self.error = error
            }
        }
    }

    deinit {
        transcriptionResultsTask?.cancel()
        audioInputTask?.cancel()

        Task { [weak self] in
            await self?.transcriber?.finishAnalysisSession()
        }
    }
}

// MARK: - 转写操作
extension SpeechAnalyzeManager {
    
    func transcribeFile(_ fileURL: URL) async throws {
        guard let transcriber else {
            throw _Error.failToCreateTranscriber
        }
        guard isTranscribing == false else { return }

        resetTranscripts()
        isTranscribing = true

        defer {
            isTranscribing = false
        }

        try await transcriber.transcribeFile(fileURL)
    }

    func startRealTimeTranscription() async throws {
        guard isTranscribing == false else { return }
        guard audioCapturerState == .stopped else { return }

        guard let transcriber else {
            throw _Error.failToCreateTranscriber
        }

        guard let audioCapturer else {
            throw _Error.failToCreateAudioCapturer
        }

        try await audioCapturer.startCapturingInput()
        try await transcriber.startRealTimeTranscription()

        resetTranscripts()
        isTranscribing = true
        audioCapturerState = .started
    }

    func pauseRealTimeTranscription() {
        audioCapturerState = .paused
        audioCapturer?.pauseCapturing()
    }

    func resumeRealTimeTranscription() throws {
        try audioCapturer?.resumeCapturing()
        audioCapturerState = .started
    }

    func stopTranscription() async throws {
        audioCapturer?.stopCapturing()
        try await transcriber?.finalizePreviousTranscribing()
        audioCapturerState = .stopped
        isTranscribing = false
    }
}

// MARK: - 语言设置管理
extension SpeechAnalyzeManager {
    
    func updateLocale(_ locale: Locale) async throws {
        guard locale != self.locale else { return }
        guard isTranscribing == false else { return }

        isSettingUp = true
        resetTranscripts()
        self.locale = locale

        await transcriber?.finishAnalysisSession()
        transcriber = nil

        transcriptionResultsTask?.cancel()
        transcriptionResultsTask = nil

        try await setupTranscriber(locale: locale)
        isSettingUp = false
    }
}

// MARK: - 私有辅助方法
private extension SpeechAnalyzeManager {
    
    func setupAudioCapturer() throws {
        audioCapturer = try AudioCapturer()

        audioInputTask = Task { @MainActor [weak self] in
            guard let self else { return }
            guard let audioCapturer = self.audioCapturer else { return }

            for await (buffer, time) in audioCapturer.inputTapEventsStream {
                if self.audioCapturerState == .started {
                    self.transcriber?.streamAudioToTranscriber(buffer)

                    if let startTime = self.audioCapturingStartTime {
                        self.audioInputEvents = (buffer.powerLevel, time.seconds - startTime)
                    }
                }
            }
        }
    }

    func setupTranscriber(locale: Locale) async throws {
        //
        transcriber = try await Transcriber(locale: locale)
        //
        transcriptionResultsTask = Task { @MainActor [weak self] in
            guard let self else { return }
            guard let transcriber = self.transcriber else { return }

            do {
                for try await event in transcriber.transcriptionEvents {
                    let attributedText = event.text
                    let text = String(attributedText.characters)
                    if event.isFinal {
                        
                        // 检测是否为完整句子，如果是则发布完整句子事件
                        if event.isCompleteSentence && !text.isEmpty {
                            self.completeSentence = (text: text, confidence: event.confidence)
                        }
                        
                        let previousConfidence = transcriptionConfidence
                        finalizedTranscript.append(text)

                        if let confidence = event.confidence {
                            transcriptionConfidence = confidence
                        } else {
                            transcriptionConfidence = previousConfidence
                        }

                        volatileTranscript = ""
                    } else {
                        volatileTranscript = text
                    }
                }
            } catch {
                if error is CancellationError {
                    return
                }

                self.error = error

                if self.isTranscribing {
                    try await self.stopTranscription()
                }
            }
        }
    }
}

// MARK: - 公共辅助方法
extension SpeechAnalyzeManager {
    func resetTranscripts() {
        volatileTranscript = ""
        finalizedTranscript = ""
        transcriptionConfidence = nil
    }
}
