/**
 * https://help.aliyun.com/zh/isi/developer-reference/websocket?spm=a2c4g.11186623.help-menu-30413.d_3_2_1_8.694e8d64PgomCT
 */

import { utc } from '@date-fns/utc'
import { merge } from '@moeru/std'
import { isBefore } from 'date-fns'
import { customAlphabet } from 'nanoid'

import { createToken } from './token'
import { nlsWebSocketEndpointFromRegion } from './utils'

// NOTICE: Aliyun NLS requires exact 32 character length in hex for IDs,
// so we use a custom nanoid alphabet and length here.
const nanoid = customAlphabet('0123456789abcdef', 32)

interface BaseEventHeader<N> {
  appkey: string
  message_id: string
  task_id: string
  namespace: 'SpeechTranscriber'
  name: N

  status?: 20000000
  status_message?: 'GATEWAY' | 'SUCCESS' | 'Success'
}

interface BaseEvent<N, P> {
  header: BaseEventHeader<N>
  payload: P
}

export interface EventStartTranscription extends BaseEvent<'StartTranscription', {
  /** Audio format, supports PCM, WAV, OPUS, SPEEX, AMR, MP3, AAC. */
  format?: 'pcm' | 'wav' | 'opus' | 'speex' | 'amr' | 'mp3' | 'aac'
  /** Audio sample rate in Hz. Default is 16000. Make sure the project is configured for the selected rate. */
  sample_rate?: 8000 | 16000
  /** Return interim recognition results. Disabled by default. */
  enable_intermediate_result?: boolean
  /** Add punctuation during post-processing. Disabled by default. */
  enable_punctuation_prediction?: boolean
  /** Inverse text normalization (ITN). Converts Chinese numerals to Arabic digits when true. Disabled by default. */
  enable_inverse_text_normalization?: boolean
  /** Custom model ID. */
  customization_id?: string
  /** Custom vocabulary ID. */
  vocabulary_id?: string
  /** Silence threshold in milliseconds for sentence segmentation. Range 200–2000 ms, default 800 ms. */
  max_sentence_silence?: number
  /** Return word-level timestamps. Disabled by default. */
  enable_words?: boolean
  /** Filter filler words (disfluency). Requires version 4.0. Disabled by default. */
  disfluency?: boolean
  /**
   * Noise threshold. Range [-1, 1].
   * Closer to -1 treats noise as speech more aggressively.
   * Closer to +1 treats speech as noise more aggressively.
   * Important: this is an advanced parameter and should be tuned carefully with dedicated testing.
   */
  speech_noise_threshold?: number
  /** Enable semantic sentence segmentation. Disabled by default. */
  enable_semantic_sentence_detection?: boolean
}> {}

export interface EventStopTranscription extends BaseEvent<'StopTranscription', undefined> {}

export interface EventTranscriptionStarted extends BaseEvent<'TranscriptionStarted', {
  /** Session ID. Returned as-is if provided by the client, otherwise generated by the server. */
  session_id: string
}> {}

export interface EventSentenceBegin extends BaseEvent<'SentenceBegin', {
  /** Incremental sentence index starting from 1. */
  index: number
  /** Sentence start time relative to the audio stream start, in milliseconds. */
  time: number
}> {}

export interface EventTranscriptionResultChanged extends BaseEvent<'TranscriptionResultChanged', {
  /** Incremental sentence index starting from 1. */
  index: number
  /** Audio duration processed so far, in milliseconds. */
  time: number
  /** Current recognition text. */
  result: string
  /** Word-level alignment information. */
  words?: {
    text: string
    startTime: number
    endTime: number
  }[]
  /** Status code returned by the service. */
  status: number
}> {}

export interface EventSentenceEnd extends BaseEvent<'SentenceEnd', {
  /** Incremental sentence index starting from 1. */
  index: number
  /** Audio duration processed so far, in milliseconds. */
  time: number
  /** Start time of the matching `SentenceBegin` event, in milliseconds. */
  begin_time: number
  /** Final recognition text for the sentence. */
  result: string
  /** Confidence score in the range [0.0, 1.0]. */
  confidence: number
  /** Word-level alignment information. */
  words?: {
    /** Word text */
    text: string
    /** Word start time */
    startTime: number
    /** Word end time */
    endTime: number
  }[]
  /** Status code returned by the service. */
  status: number
  /** Buffered result when semantic segmentation is enabled; contains the upcoming unfinished sentence. */
  stash_result: {
    /** Incremental sentence index starting from 1. */
    sentenceId: number
    /** Sentence start time. */
    beginTime: number
    /** Recognized text. */
    text: string
    /** Current processing time. */
    currentTime: number
  }
}> {}

export interface EventTranscriptionCompleted extends BaseEvent<'TranscriptionCompleted', undefined> {}

export interface ClientEvents {
  StartTranscription: EventStartTranscription['payload']
  StopTranscription: EventStopTranscription['payload']
}

export type ClientEvent = {
  [K in keyof ClientEvents]: BaseEvent<K, ClientEvents[K]>;
}[keyof ClientEvents]

export interface ServerEvents {
  TranscriptionStarted: EventTranscriptionStarted['payload']
  SentenceBegin: EventSentenceBegin['payload']
  TranscriptionResultChanged: EventTranscriptionResultChanged['payload']
  SentenceEnd: EventSentenceEnd['payload']
  TranscriptionCompleted: EventTranscriptionCompleted['payload']
}

export type ServerEvent = {
  [K in keyof ServerEvents]: BaseEvent<K, ServerEvents[K]>;
}[keyof ServerEvents]

export function createAliyunNLSSession(
  accessKeyId: string,
  accessKeySecret: string,
  appKey: string,
  options?: {
    region?:
      | 'cn-shanghai'
      | 'cn-shanghai-internal'
      | 'cn-beijing'
      | 'cn-beijing-internal'
      | 'cn-shenzhen'
      | 'cn-shenzhen-internal'
  },
) {
  const provider = createAliyunNLSProvider(accessKeyId, accessKeySecret, appKey, options)
  const providerSessionId = nanoid()

  function start(websocketConn: WebSocket, options?: {
    sessionId?: string
  } & EventStartTranscription['payload']) {
    const mergedOptions = merge({ sessionId: providerSessionId }, options)

    websocketConn.send(JSON.stringify({
      header: {
        appkey: provider.appKey,
        message_id: nanoid(),
        task_id: mergedOptions.sessionId,
        namespace: 'SpeechTranscriber',
        name: 'StartTranscription',
      },
      payload: {
        format: 'wav',
      },
    } satisfies EventStartTranscription))
  }

  function stop(websocketConn?: WebSocket, options?: {
    sessionId?: string
  }) {
    const mergedOptions = merge({ sessionId: providerSessionId }, options)

    websocketConn?.send(JSON.stringify({
      header: {
        appkey: provider.appKey,
        message_id: nanoid(),
        task_id: mergedOptions.sessionId,
        namespace: 'SpeechTranscriber',
        name: 'StopTranscription',
      },
      payload: undefined,
    } satisfies EventStopTranscription))
  }

  function onEvent(data: unknown, cb: (event: ServerEvent) => void) {
    const event = data as ServerEvent
    cb(event)
  }

  return {
    ...provider,

    sessionId: providerSessionId,
    start,
    stop,
    onEvent,
  }
}

export function createAliyunNLSProvider(
  accessKeyId: string,
  accessKeySecret: string,
  appKey: string,
  options?: {
    region?:
      | 'cn-shanghai'
      | 'cn-shanghai-internal'
      | 'cn-beijing'
      | 'cn-beijing-internal'
      | 'cn-shenzhen'
      | 'cn-shenzhen-internal'
  },
) {
  let token: string = ''
  let tokenExpiresAt: number = utc(new Date()).getTime()

  async function websocketUrl() {
    if (!token || isBefore(new Date(tokenExpiresAt), utc(new Date()))) {
      const created = await createToken(accessKeyId, accessKeySecret, { regionId: options?.region ?? 'cn-shanghai' })
      token = created.token
      tokenExpiresAt = created.expiresAt
    }

    const url = nlsWebSocketEndpointFromRegion(options?.region ?? 'cn-shanghai')
    url.searchParams.set('token', token)
    return url.toString()
  }

  return {
    websocketUrl,
    appKey,
  }
}
