//
//  SpeechRcgnzr.swift
//  DentalFormula
//
//  Created by Алексей Малашин on 26.09.2023.
//

import Foundation
import Speech
import SwiftUI

enum RecognizerError: Error {
    case nilRecognizer
    case notAuthorizedToRecognize
    case notPermittedToRecord
    case recognizerIsUnavailable
    case cantCreateAudioBuffer
    case unableUseAppDir
    
    var message: String {
        switch self {
        case .nilRecognizer: return "Can't initialize speech recognizer"
        case .notAuthorizedToRecognize: return "Not authorized to recognize speech"
        case .notPermittedToRecord: return "Not permitted to record audio"
        case .recognizerIsUnavailable: return "Recognizer is unavailable"
        case .cantCreateAudioBuffer: return "Unable to create a SFSpeechAudioBufferRecognitionRequest object"
        case .unableUseAppDir: return "Unable to create Application Support directory to story generated language model"
        }
    }
}

actor SpeechRecognizer: ObservableObject {
    private var audioEngine = AVAudioEngine()
    private let speechRecognizer: SFSpeechRecognizer?// = SFSpeechRecognizer(locale: Locale(identifier: language))
    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
    private var recognitionTask: SFSpeechRecognitionTask?

    private let languageId = Bundle.main.preferredLocalizations[0]
    private let localeId = Bundle.main.preferredLocalizations[0] == "ru" ? "ru_RU" : "en_US"
    
    private var appDataDir: URL {
        let appSupportDir = try! URL(for: .applicationSupportDirectory, in: .userDomainMask, appropriateFor: nil, create: true)
        // let cachesDir = appDataDir//FileManager.default.urls(for: .cachesDirectory, in: .userDomainMask).first!
        let appDataDir = appSupportDir.appendingPathComponent("DentalFormula", conformingTo: .directory)
        try! FileManager.default.createDirectory(at: appDataDir, withIntermediateDirectories: true)
        return appDataDir
    }
    
    @available(iOS 17, *)
    private var lmConfiguration: SFSpeechLanguageModel.Configuration {
        let dynamicLanguageModel = appDataDir.appendingPathComponent("df-language-model")
        let dynamicVocabulary = appDataDir.appendingPathComponent("df-vocabulary")
//        print(dynamicLanguageModel.absoluteString)
//        print(dynamicVocabulary.absoluteString)
        return SFSpeechLanguageModel.Configuration(languageModel: dynamicLanguageModel, vocabulary: dynamicVocabulary)
    }

    init() {
        print("SpeechRecognizer: Language = \(languageId)")
        print("SpeechRecognizer: Locale = \(localeId)")
        speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: localeId))
        
        Task.detached {
            do {
                guard self.speechRecognizer != nil else { throw RecognizerError.nilRecognizer }
                guard await SFSpeechRecognizer.hasAuthorizationToRecognize() else { throw RecognizerError.notAuthorizedToRecognize }
                guard await AVAudioSession.sharedInstance().hasPermissionToRecord() else { throw RecognizerError.notPermittedToRecord }
                if #available(iOS 17, *) {
//                    let locale = Locale(identifier: self.localeId)
//                    let lmTrainDataFile = /*outputDir*/await self.appDataDir//.appendingPathComponent("df-train-data.bin")
                    let lmDataPath = Bundle.main.path(forResource: "lm-data-\(self.languageId)", ofType: "", inDirectory: "LM Data")!
                    let lmDataUrl = URL(fileURLWithPath: lmDataPath)
                    print("SpeechRecognizer: LM Data File = \(lmDataUrl.absoluteString)")
                    do {
//                        print("LM Model: \(await self.lmConfiguration.languageModel.absoluteString)")
//                        print("LM Vocab: \(await self.lmConfiguration.vocabulary?.absoluteString)")
                        try await SFSpeechLanguageModel.prepareCustomLanguageModel(for: lmDataUrl, clientIdentifier: "in.malash.DentalFormula", configuration: self.lmConfiguration) // prepare custom model
                    } catch {
                        print("SpeechRecognizer: \(error.localizedDescription)")
                    }
                }
            } catch {
                print("SpeechRecognizer: \(error.localizedDescription)")
            }
        }
    }

    private func start(textHandler: @escaping (String) -> Void) throws {
        guard let speechRecognizer, speechRecognizer.isAvailable else { throw RecognizerError.recognizerIsUnavailable }
        
        // Cancel any previous task if any.
        if let recognitionTask = recognitionTask {
            recognitionTask.cancel()
            self.recognitionTask = nil
        }
        
        // Configure the audio session for the app.
        let audioSession = AVAudioSession.sharedInstance()
        try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
        try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
        let inputNode = audioEngine.inputNode
        
        // Create and configure the speech recognition request.
        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
        guard let recognitionRequest = recognitionRequest else { throw RecognizerError.cantCreateAudioBuffer }
        recognitionRequest.shouldReportPartialResults = true
        if #available(iOS 13, *) { // Keep speech recognition data on the device
            recognitionRequest.requiresOnDeviceRecognition = true
            if #available(iOS 17, *) {
                recognitionRequest.customizedLanguageModel = lmConfiguration
            }
        }
        
        // Create a recognition task for the speech recognition session.
        // Keep a reference to the task so that it can be canceled.
        recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
            var isFinal = false
            
            if let result = result {
                // Call the text handler with the results
                print("SpeechRecognizer: \(result.bestTranscription.formattedString)")
                textHandler(result.bestTranscription.formattedString)
                isFinal = result.isFinal
            }
            
            if error != nil || isFinal {
                // Stop recognizing speech if there is a problem.
                self.audioEngine.stop()
                inputNode.removeTap(onBus: 0)
                self.recognitionRequest = nil
                self.recognitionTask = nil
            }
        }
        
        // Configure the microphone input.
        let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
            self.recognitionRequest?.append(buffer)
        }
        
        audioEngine.prepare()
        try audioEngine.start()

        // Let the user know to start talking.
        textHandler(String(localized: "dictate.msg.start"))
    }
    
    private func stop(textHandler: @escaping (String) -> Void) {
        if audioEngine.isRunning {
            audioEngine.stop()
            recognitionRequest?.endAudio()
            recognitionTask?.cancel()
            recognitionRequest = nil
            recognitionTask = nil
            textHandler(String(localized: "msg.press-green-button"))
            print("SpeechRecognizer: Audio engine stopped")
        }
    }
    
    @MainActor func startListening(textHandler: @escaping (String) -> Void) {
        Task {
            print("SpeechRecognizer: Start listening")
            try await start(textHandler: textHandler)
        }
    }
    
    @MainActor func stopListening(textHandler: @escaping (String) -> Void) {
        Task {
            print("SpeechRecognizer: Stop listening")
            await stop(textHandler: textHandler)
        }
    }
}


// MARK: Extensions

extension SFSpeechRecognizer {
    static func hasAuthorizationToRecognize() async -> Bool {
        await withCheckedContinuation { continuation in
            requestAuthorization { status in
                continuation.resume(returning: status == .authorized)
            }
        }
    }
}

extension AVAudioSession {
    func hasPermissionToRecord() async -> Bool {
        await withCheckedContinuation { continuation in
            requestRecordPermission { authorized in
                continuation.resume(returning: authorized)
            }
        }
    }
}
