#!/usr/bin/env python3
# Author: Armit
# Create Time: 周六 2025/08/23

import torch
from pathlib import Path
from datetime import datetime
from typing import Tuple
from transformers import WhisperProcessor, WhisperFeatureExtractor, WhisperTokenizer, WhisperForConditionalGeneration

device = 'cuda' if torch.cuda.is_available() else 'cpu'
now_datetime_str = lambda: str(datetime.now()).replace('-', '').replace(' ', '_').replace(':', '')

BASE_PATH = Path(__file__).parent
DATA_PATH = BASE_PATH / 'data' / 'testset'
PREPROCESSED_PATH = BASE_PATH / 'data_preprocessed'
PREPROCESSED_PATH.mkdir(exist_ok=True)
LOG_PATH = BASE_PATH / 'log'
LOG_PATH.mkdir(exist_ok=True)
SUBMIT_PATH = BASE_PATH / 'submit'
SUBMIT_PATH.mkdir(exist_ok=True)

SAMPLE_RATE = 16000
TARGET_LOUNDNESS = -20  # dB

MODELS = [
  'tiny',
  'base',
  'large-v3-turbo',
]
LANGUAGES = [   # known languages by WhisperTokenizer
  'afrikaans', 'albanian', 'amharic', 'arabic', 'armenian', 'assamese', 'azerbaijani',
  'bashkir', 'basque', 'belarusian', 'bengali', 'bosnian', 'breton', 'bulgarian', 'burmese',
  'cantonese', 'castilian', 'catalan', 'chinese', 'croatian', 'czech',
  'danish', 'dutch',
  'english', 'estonian',
  'faroese', 'finnish', 'flemish', 'french',
  'galician', 'georgian', 'german', 'greek', 'gujarati',
  'haitian', 'haitian creole', 'hausa', 'hawaiian', 'hebrew', 'hindi', 'hungarian',
  'icelandic', 'indonesian', 'italian',
  'japanese', 'javanese',
  'kannada', 'kazakh', 'khmer', 'korean',
  'lao', 'latin', 'latvian', 'letzeburgesch', 'lingala', 'lithuanian', 'luxembourgish',
  'macedonian', 'malagasy', 'malay', 'malayalam', 'maltese', 'mandarin', 'maori', 'marathi', 'moldavian', 'moldovan', 'mongolian', 'myanmar',
  'nepali', 'norwegian', 'nynorsk',
  'occitan',
  'panjabi', 'pashto', 'persian', 'polish', 'portuguese', 'punjabi', 'pushto',
  'romanian', 'russian',
  'sanskrit', 'serbian', 'shona', 'sindhi', 'sinhala', 'sinhalese', 'slovak', 'slovenian', 'somali', 'spanish', 'sundanese', 'swahili', 'swedish',
  'tagalog', 'tajik', 'tamil', 'tatar', 'telugu', 'thai', 'tibetan', 'turkish', 'turkmen',
  'ukrainian', 'urdu', 'uzbek',
  'valencian', 'vietnamese',
  'welsh',
  'yiddish', 'yoruba',
]
LANG_TO_LANGUAGE = {
  'is_is': 'icelandic',
  'sw_ke': 'swahili',
  'am_et': 'amharic',
  'as_in': 'assamese',
  'yo_ng': 'yoruba',
  'ne_np': 'nepali',
  'lb_lu': 'luxembourgish',
  'ha_ng': 'hausa',
  'ln_cd': 'lingala',
  'zu_za': 'zulu',     # 缺lang token
  'sd_in': 'sindhi',
  'mt_mt': 'maltese',
  'et_ee': 'estonian',
  'ky_kg': 'kyrgyz',    # 缺lang token
  'pa_in': 'punjabi',
  'lt_lt': 'lithuanian',
  'be_by': 'belarusian',
  'cy_gb': 'welsh',
  'so_so': 'somali',
}
LANGUAGE_TO_LANG = {v: k for k, v in LANG_TO_LANGUAGE.items()}
LANG_TOKEN_TO_LANGUAGE = {k.split('_')[0]: v for k, v in LANG_TO_LANGUAGE.items()}
LANG_TOKEN_FIX_MAP = {
  # 微调的重映射
  'kk': 'ky',
  'bo': 'zu',
  # 预训练的重映射
  'sv': 'is',
  'no': 'is',
  'nn': 'is',
  'ht': 'yo',
  'fi': 'et',
  'en': 'cy',
  'hy': 'am',
  'ar': 'so',
  'hi': 'pa',
  'bn': 'as',
  'la': 'mt',
  'it': 'mt',
  'ro': 'mt',
  'de': 'lb',
  'tr': 'ky',
  'ru': 'be',
  'uk': 'be',
  'ur': 'sd',
  'fr': 'ln',
  'pt': 'ln',
  'ms': 'ha',
  'az': 'ky',
}

LORA_TARGET_MODULES = ['k_proj', 'q_proj', 'v_proj', 'out_proj', 'fc1', 'fc2']

# whisper字典速查
TOKENIZER_VOCAB_MEMO = {
  50257: '<|endoftext|>',
  50258: '<|startoftranscript|>',
  # 此间为99个语言代码，从 <|en|> 到 <|en|>
  50358: '<|translate|>',
  50359: '<|transcribe|>',
  50360: '<|startoflm|>',
  50361: '<|startofprev|>',
  50362: '<|nocaptions|>',
  50363: '<|notimestamps|>',
  # 此间为1501个时间戳
  #51865: '<|zu|>',
  #51865: '<|ky|>',
}
# whisper句式速查
#  [50258, 50363, ..., 50257] = <|startoftranscript|><|notimestamps|>...<|endoftext|>
#  [50258, 50359, 50363, ..., 50257] = <|startoftranscript|><|transcribe|><|notimestamps|>...<|endoftext|>
#  [50258, <lang>, 50359, 50363, ..., 50257] = <|startoftranscript|><|?lang?|><|transcribe|><|notimestamps|>...<|endoftext|>


def get_whisper(model_path:str) -> Tuple[WhisperForConditionalGeneration, WhisperProcessor]:
  if not model_path.startswith('openai'):
    model_path = f'openai/whisper-{model_path}'

  processor: WhisperProcessor = WhisperProcessor.from_pretrained(model_path, language=None, task='transcribe', no_timestamps=True, device_map=device)
  if 'hijack processor config':
    if not '不能加token，否则要改最后一层Linear层':
      T: WhisperTokenizer = processor.tokenizer
      T.add_tokens('<|zu|>', special_tokens=True)
      T.add_tokens('<|ky|>', special_tokens=True)

  model: WhisperForConditionalGeneration = WhisperForConditionalGeneration.from_pretrained(model_path, load_in_8bit=False, device_map=device)
  if 'hijack model config':
    model.config.forced_decoder_ids = None
    model.config.suppress_tokens = []

  print(model.config)
  return model, processor


# 这个板本缩短了模型输入尺寸，提升了速度，但是预训练权重会掉分！！
# TODO: 是不是不能直接截断embed_positions的形状，而应该降采样？
def get_whisper_hijack(model_path:str, n_frames:int=128) -> Tuple[WhisperForConditionalGeneration, WhisperProcessor]:
  if not model_path.startswith('openai'):
    model_path = f'openai/whisper-{model_path}'

  processor: WhisperProcessor = WhisperProcessor.from_pretrained(model_path, device_map=device)
  if 'hijack processor config':
    # limit audio length
    FE: WhisperFeatureExtractor = processor.feature_extractor
    FE.n_samples = FE.hop_length * n_frames
    FE.nb_max_frames = FE.n_samples // FE.hop_length  # let this be 128 (=1.28s is enough) 🤔
    print('>> n_samples:', FE.n_samples)
    print('>> nb_max_frames:', FE.nb_max_frames)
    # add more lang id token
    if not '不能加token，否则要改最后一层Linear层':
      T: WhisperTokenizer = processor.tokenizer
      T.add_tokens('<|zu|>', special_tokens=True)
      T.add_tokens('<|ky|>', special_tokens=True)

  model: WhisperForConditionalGeneration = WhisperForConditionalGeneration.from_pretrained(model_path, device_map=device)
  if 'hijack model config':
    model.config.forced_decoder_ids = None
    model.config.suppress_tokens = []
    model_encoder = model.get_encoder()
    model.config.max_source_positions = FE.nb_max_frames // (model_encoder.conv1.stride[0] * model_encoder.conv2.stride[0])  # nb_max_frames=max_source_positions*2
    model_encoder.embed_positions.weight.data = model_encoder.embed_positions.weight.data[:model.config.max_source_positions]

  print(model.config)
  return model, processor


# 如果微调的话，hijack要好一些
#get_whisper_auto = get_whisper
get_whisper_auto = get_whisper_hijack
