﻿using System;
using System.Runtime.InteropServices;

namespace WhisperWrapper
{
    public enum WhisperSamplingStrategy
    {
        WHISPER_SAMPLING_GREEDY,      // similar to OpenAI's GreedyDecoder
        WHISPER_SAMPLING_BEAM_SEARCH, // similar to OpenAI's BeamSearchDecoder
    }

    [StructLayout(LayoutKind.Sequential)]
    public struct WhisperParamGreedy
    {
        public int best_of;    // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L264
    }

    [StructLayout(LayoutKind.Sequential)]
    public struct WhisperParamBeamSearch
    {
        public int beam_size;  // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L265
        public float patience; // TODO: not implemented, ref: https://arxiv.org/pdf/2204.05424.pdf
    }

    [UnmanagedFunctionPointer(CallingConvention.Cdecl)]
    public delegate void WhisperNewSegmentCallback(IntPtr ctx, IntPtr state, int n_new, IntPtr user_data);
    [UnmanagedFunctionPointer(CallingConvention.Cdecl)]
    public delegate void WhisperProgressCallback(IntPtr ctx, IntPtr state, int progress, IntPtr user_data);
    [UnmanagedFunctionPointer(CallingConvention.Cdecl)]
    public delegate bool WhisperEncoderBeginCallback(IntPtr ctx, IntPtr state, IntPtr user_data);
    [UnmanagedFunctionPointer(CallingConvention.Cdecl)]
    public delegate void WhisperLogitsFilterCallback(
        IntPtr ctx,
        IntPtr state,
        IntPtr tokens,
        int n_tokens,
        IntPtr logits,
        IntPtr user_data
    );

    [StructLayout(LayoutKind.Sequential)]
    public struct WhisperFullParams
    {
        public WhisperSamplingStrategy strategy;

        public int n_threads;
        public int n_max_text_ctx;     // max tokens to use from past text as prompt for the decoder
        public int offset_ms;          // start offset in ms
        public int duration_ms;        // audio duration to process in ms

        public byte translate;
        public byte no_context;        // do not use past transcription (if any) as initial prompt for the decoder
        public byte single_segment;    // force single segment output (useful for streaming)
        public byte print_special;     // print special tokens (e.g. <SOT>, <EOT>, <BEG>, etc.)
        public byte print_progress;    // print progress information
        public byte print_realtime;    // print results from within whisper.cpp (avoid it, use callback instead)
        public byte print_timestamps;  // print timestamps for each text segment when printing realtime

        // [EXPERIMENTAL] token-level timestamps
        public byte token_timestamps; // enable token-level timestamps
        public float thold_pt;         // timestamp token probability threshold (~0.01)
        public float thold_ptsum;      // timestamp token sum probability threshold (~0.01)
        public int max_len;          // max segment length in characters
        public byte split_on_word;    // split on word rather than on token (when used with max_len)
        public int max_tokens;       // max tokens per segment (0 = no limit)

        // [EXPERIMENTAL] speed-up techniques
        // note: these can significantly reduce the quality of the output
        public byte speed_up;          // speed-up the audio by 2x using Phase Vocoder
        public int audio_ctx;         // overwrite the audio context size (0 = use default)

        // [EXPERIMENTAL] [TDRZ] tinydiarize
        public byte tdrz_enable;       // enable tinydiarize speaker turn detection

        // tokens to provide to the whisper decoder as initial prompt
        // these are prepended to any existing text context from a previous call
        public IntPtr initial_prompt;
        public IntPtr prompt_tokens;
        public int prompt_n_tokens;

        // for auto-detection, set to nullptr, "" or "auto"
        public IntPtr language;
        public byte detect_language;

        // common decoding parameters:
        public byte suppress_blank;    // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L89
        public byte suppress_non_speech_tokens; // ref: https://github.com/openai/whisper/blob/7858aa9c08d98f75575035ecd6481f462d66ca27/whisper/tokenizer.py#L224-L253

        public float temperature;      // initial decoding temperature, ref: https://ai.stackexchange.com/a/32478
        public float max_initial_ts;   // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L97
        public float length_penalty;   // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L267

        // fallback parameters
        // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L274-L278
        public float temperature_inc;
        public float entropy_thold;    // similar to OpenAI's "compression_ratio_threshold"
        public float logprob_thold;
        public float no_speech_thold;  // TODO: not implemented

        public WhisperParamGreedy greedy;
        public WhisperParamBeamSearch beam_search;

        // called for every newly generated text segment
        public IntPtr new_segment_callback;
        public IntPtr new_segment_callback_user_data;

        // called on each progress update
        public IntPtr progress_callback;
        public IntPtr progress_callback_user_data;

        // called each time before the encoder starts
        public IntPtr encoder_begin_callback;
        public IntPtr encoder_begin_callback_user_data;

        // called by each decoder to filter obtained logits
        public IntPtr logits_filter_callback;
        public IntPtr logits_filter_callback_user_data;
    }
}
