#import "SpeechEngine.h"
#import <Speech/Speech.h>
#import <AVFoundation/AVFoundation.h>

@interface SpeechEngine () <SFSpeechRecognizerDelegate, AVSpeechSynthesizerDelegate>

@property (nonatomic, strong) SFSpeechRecognizer *speechRecognizer;
@property (nonatomic, strong) SFSpeechAudioBufferRecognitionRequest *recognitionRequest;
@property (nonatomic, strong) SFSpeechRecognitionTask *recognitionTask;
@property (nonatomic, strong) AVAudioEngine *audioEngine;
@property (nonatomic, strong) AVSpeechSynthesizer *speechSynthesizer;
@property (nonatomic, strong) RCTPromiseResolveBlock recognitionResolver;
@property (nonatomic, strong) RCTPromiseRejectBlock recognitionRejecter;

@end

@implementation SpeechEngine

RCT_EXPORT_MODULE()

- (instancetype)init
{
    self = [super init];
    if (self) {
        _audioEngine = [[AVAudioEngine alloc] init];
        _speechSynthesizer = [[AVSpeechSynthesizer alloc] init];
        _speechSynthesizer.delegate = self;
    }
    return self;
}

- (NSArray<NSString *> *)supportedEvents
{
    return @[
        @"onSpeechRecognitionStart",
        @"onSpeechRecognitionEnd",
        @"onSpeechRecognitionResults",
        @"onSpeechRecognitionError",
        @"onTextToSpeechStart",
        @"onTextToSpeechFinish",
        @"onTextToSpeechError"
    ];
}

#pragma mark - Speech Recognition Methods

RCT_EXPORT_METHOD(startSpeechRecognition:(NSString *)locale
                  withResolver:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    // Request speech recognition authorization
    [SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus authStatus) {
        dispatch_async(dispatch_get_main_queue(), ^{
            switch (authStatus) {
                case SFSpeechRecognizerAuthorizationStatusAuthorized:
                    [self startRecognitionWithLocale:locale resolver:resolve rejecter:reject];
                    break;
                case SFSpeechRecognizerAuthorizationStatusDenied:
                    reject(@"SPEECH_RECOGNITION_ERROR", @"Speech recognition authorization denied", nil);
                    break;
                case SFSpeechRecognizerAuthorizationStatusRestricted:
                    reject(@"SPEECH_RECOGNITION_ERROR", @"Speech recognition restricted", nil);
                    break;
                case SFSpeechRecognizerAuthorizationStatusNotDetermined:
                    reject(@"SPEECH_RECOGNITION_ERROR", @"Speech recognition not determined", nil);
                    break;
            }
        });
    }];
}

- (void)startRecognitionWithLocale:(NSString *)locale
                          resolver:(RCTPromiseResolveBlock)resolve
                          rejecter:(RCTPromiseRejectBlock)reject
{
    // Request microphone permission
    AVAudioSession *audioSession = [AVAudioSession sharedInstance];
    [audioSession requestRecordPermission:^(BOOL granted) {
        if (!granted) {
            reject(@"SPEECH_RECOGNITION_ERROR", @"Microphone permission denied", nil);
            return;
        }
        
        dispatch_async(dispatch_get_main_queue(), ^{
            [self setupSpeechRecognitionWithLocale:locale resolver:resolve rejecter:reject];
        });
    }];
}

- (void)setupSpeechRecognitionWithLocale:(NSString *)locale
                                resolver:(RCTPromiseResolveBlock)resolve
                                rejecter:(RCTPromiseRejectBlock)reject
{
    // Cancel any existing recognition task
    if (self.recognitionTask) {
        [self.recognitionTask cancel];
        self.recognitionTask = nil;
    }
    
    // Setup speech recognizer
    NSLocale *nsLocale = [NSLocale localeWithLocaleIdentifier:locale];
    self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:nsLocale];
    self.speechRecognizer.delegate = self;
    
    if (!self.speechRecognizer.isAvailable) {
        reject(@"SPEECH_RECOGNITION_ERROR", @"Speech recognizer not available", nil);
        return;
    }
    
    // Setup audio session
    NSError *error;
    AVAudioSession *audioSession = [AVAudioSession sharedInstance];
    [audioSession setCategory:AVAudioSessionCategoryRecord
                         mode:AVAudioSessionModeMeasurement
                      options:AVAudioSessionCategoryOptionDuckOthers
                        error:&error];
    
    if (error) {
        reject(@"SPEECH_RECOGNITION_ERROR", error.localizedDescription, error);
        return;
    }
    
    [audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&error];
    if (error) {
        reject(@"SPEECH_RECOGNITION_ERROR", error.localizedDescription, error);
        return;
    }
    
    // Create recognition request
    self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
    self.recognitionRequest.shouldReportPartialResults = YES;
    
    if (@available(iOS 13, *)) {
        self.recognitionRequest.requiresOnDeviceRecognition = NO;
    }
    
    // Setup audio engine
    AVAudioInputNode *inputNode = self.audioEngine.inputNode;
    AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
    
    [inputNode installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) {
        [self.recognitionRequest appendAudioPCMBuffer:buffer];
    }];
    
    [self.audioEngine prepare];
    [self.audioEngine startAndReturnError:&error];
    
    if (error) {
        reject(@"SPEECH_RECOGNITION_ERROR", error.localizedDescription, error);
        return;
    }
    
    self.recognitionResolver = resolve;
    self.recognitionRejecter = reject;
    
    // Start recognition task
    self.recognitionTask = [self.speechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) {
        if (error) {
            [self sendEventWithName:@"onSpeechRecognitionError" body:@{
                @"code": @(error.code),
                @"message": error.localizedDescription
            }];
            return;
        }
        
        if (result) {
            NSMutableArray *results = [[NSMutableArray alloc] init];
            for (SFTranscription *transcription in result.transcriptions) {
                [results addObject:@{
                    @"transcript": transcription.formattedString,
                    @"confidence": @(result.bestTranscription.segments.firstObject.confidence),
                    @"isFinal": @(result.isFinal)
                }];
            }
            
            [self sendEventWithName:@"onSpeechRecognitionResults" body:results];
        }
    }];
    
    [self sendEventWithName:@"onSpeechRecognitionStart" body:nil];
    resolve(nil);
}

RCT_EXPORT_METHOD(stopSpeechRecognition:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    if (self.audioEngine.isRunning) {
        [self.audioEngine stop];
        [self.recognitionRequest endAudio];
    }
    
    if (self.recognitionTask) {
        [self.recognitionTask finish];
        self.recognitionTask = nil;
    }
    
    [self sendEventWithName:@"onSpeechRecognitionEnd" body:nil];
    resolve(@"Speech recognition stopped");
}

RCT_EXPORT_METHOD(cancelSpeechRecognition:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    if (self.audioEngine.isRunning) {
        [self.audioEngine stop];
    }
    
    if (self.recognitionTask) {
        [self.recognitionTask cancel];
        self.recognitionTask = nil;
    }
    
    if (self.recognitionRequest) {
        self.recognitionRequest = nil;
    }
    
    resolve(nil);
}

RCT_EXPORT_METHOD(isSpeechRecognitionAvailable:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    BOOL available = [SFSpeechRecognizer class] != nil && SFSpeechRecognizer.supportedLocales.count > 0;
    resolve(@(available));
}

#pragma mark - Text to Speech Methods

RCT_EXPORT_METHOD(speak:(NSString *)text
                  withOptions:(NSDictionary *)options
                  withResolver:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    if ([self.speechSynthesizer isSpeaking]) {
        [self.speechSynthesizer stopSpeakingAtBoundary:AVSpeechBoundaryImmediate];
    }
    
    AVSpeechUtterance *utterance = [AVSpeechUtterance speechUtteranceWithString:text];
    
    // Set language
    NSString *language = options[@"language"] ?: @"en-US";
    utterance.voice = [AVSpeechSynthesisVoice voiceWithLanguage:language];
    
    // Set pitch
    if (options[@"pitch"]) {
        utterance.pitchMultiplier = [options[@"pitch"] floatValue];
    }
    
    // Set rate
    if (options[@"rate"]) {
        utterance.rate = [options[@"rate"] floatValue] * AVSpeechUtteranceDefaultSpeechRate;
    }
    
    // Set volume
    if (options[@"volume"]) {
        utterance.volume = [options[@"volume"] floatValue];
    }
    
    [self.speechSynthesizer speakUtterance:utterance];
    resolve(nil);
}

RCT_EXPORT_METHOD(stop:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    [self.speechSynthesizer stopSpeakingAtBoundary:AVSpeechBoundaryImmediate];
    resolve(nil);
}

RCT_EXPORT_METHOD(pause:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    [self.speechSynthesizer pauseSpeakingAtBoundary:AVSpeechBoundaryImmediate];
    resolve(nil);
}

RCT_EXPORT_METHOD(resume:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    [self.speechSynthesizer continueSpeaking];
    resolve(nil);
}

RCT_EXPORT_METHOD(isSpeaking:(RCTPromiseResolveBlock)resolve
                  withRejecter:(RCTPromiseRejectBlock)reject)
{
    BOOL speaking = [self.speechSynthesizer isSpeaking];
    resolve(@(speaking));
}

#pragma mark - AVSpeechSynthesizerDelegate

- (void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didStartSpeechUtterance:(AVSpeechUtterance *)utterance
{
    [self sendEventWithName:@"onTextToSpeechStart" body:nil];
}

- (void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didFinishSpeechUtterance:(AVSpeechUtterance *)utterance
{
    [self sendEventWithName:@"onTextToSpeechFinish" body:nil];
}

- (void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didCancelSpeechUtterance:(AVSpeechUtterance *)utterance
{
    [self sendEventWithName:@"onTextToSpeechFinish" body:nil];
}

#pragma mark - SFSpeechRecognizerDelegate

- (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidChange:(BOOL)available
{
    if (!available && self.recognitionRejecter) {
        self.recognitionRejecter(@"SPEECH_RECOGNITION_ERROR", @"Speech recognizer became unavailable", nil);
        self.recognitionRejecter = nil;
        self.recognitionResolver = nil;
    }
}

#pragma mark - New Architecture Support

#ifdef RCT_NEW_ARCH_ENABLED
- (std::shared_ptr<facebook::react::TurboModule>)getTurboModule:
    (const facebook::react::ObjCTurboModule::InitParams &)params
{
    return std::make_shared<facebook::react::NativeSpeechEngineSpecJSI>(params);
}
#endif

// Required for RN built in Event Emitter Calls
RCT_EXPORT_METHOD(addListener:(NSString *)eventName)
{
    // Keep: Required for RN built in Event Emitter Calls.
}

RCT_EXPORT_METHOD(removeListeners:(double)count)
{
    // Keep: Required for RN built in Event Emitter Calls.
}

@end
