#! /bin/python

# 代码借鉴自https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn
# https://github.com/jonatasgrosman/wav2vec2-sprint/blob/main/run_common_voice.py

import torch
import wave
from transformers import Wav2Vec2ForCTC,Wav2Vec2Processor
import numpy as np
from pathlib import Path
import torch.nn as nn
from audiomentations import (
    Compose,
    AddGaussianNoise,
    Gain,
    PitchShift,
    TimeStretch,
)
import torch.optim as optim

use_cuda=torch.cuda.is_available()
device=torch.device('cuda' if use_cuda else 'cpu')

processor=Wav2Vec2Processor.from_pretrained('/home/tellw/models/e2ecm')

def get_model():
	return Wav2Vec2ForCTC.from_pretrained('/home/tellw/models/e2ecm').double()

def train_fpfe(wav_signal,sample_rate=16_000,apply_gaussian_noise_with_p=0.5,apply_gain_with_p=0.5,apply_pitch_shift_with_p=0.5,apply_time_stretch_with_p=0.5):
	if apply_gaussian_noise_with_p+apply_gain_with_p+apply_pitch_shift_with_p+apply_time_stretch_with_p>0:
		augmentator=Compose([
			TimeStretch(min_rate=0.8,max_rate=1.2,leave_length_unchanged=False,p=apply_time_stretch_with_p),
			PitchShift(min_semitones=-1,max_semitones=1,p=apply_pitch_shift_with_p),
			Gain(min_gain_in_db=-1,max_gain_in_db=1,p=apply_gain_with_p),
			AddGaussianNoise(min_amplitude=0.0001,max_amplitude=0.001,p=apply_gaussian_noise_with_p),
		])
		return augmentator(samples=wav_signal.astype(np.float32),sample_rate=sample_rate)
	else:
		return wav_signal

def test_fpfe(wav_signal):
	return wav_signal

def train_data_preprocessor(features,labels,padding=True,max_length=None,max_length_labels=None,pad_to_multiple_of=None,pad_to_multiple_of_labels=None):
	input_features=[{'input_values':processor(feature,sampling_rate=16000).input_values[0]} for feature in features]
	with processor.as_target_processor():
		label_features=[{'input_ids':processor(label).input_ids} for label in labels]
	batch=processor.pad(input_features,padding=padding,max_length=max_length,pad_to_multiple_of=pad_to_multiple_of,return_tensors='pt',)
	with processor.as_target_processor():
		labels_batch=processor.pad(label_features,padding=padding,max_length=max_length_labels,pad_to_multiple_of=pad_to_multiple_of_labels,return_tensors='pt',)
	labels=labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1),-100)
	batch['labels']=labels
	return (batch.input_values.double(),batch.attention_mask.double(),None,None,None,batch.labels),batch.labels,None

def val_data_preprocessor(features,labels):
	input_features=[{'input_values':processor(feature,sampling_rate=16000).input_values[0]} for feature in features]
	batch=processor.pad(input_features,return_tensors='pt',padding=True)
	return (batch.input_values.double(),batch.attention_mask.double()),labels,None
	
def infer_data_preprocessor(features,labels):	
	# input_features=[{'input_values':feature} for feature in features] # 见https://github.com/jonatasgrosman/wav2vec2-sprint/blob/main/run_common_voice.py 313行
	# batch=processor(input_features,sampling_rate=16_000,return_tensors='pt',padding=True)
	input_features=[{'input_values':processor(feature,sampling_rate=16000).input_values[0]} for feature in features]
	batch=processor.pad(input_features,return_tensors='pt',padding=True)
	return (batch.input_values.double(),batch.attention_mask.double()),None,None

def get_loss(device):
	def loss(input_data,output_data):
		return output_data[0]
	return loss

def get_optimizer(model):
	return optim.AdamW(model.parameters(),0.001)

def post_processor(output):
	predicted_ids=torch.argmax(output.logits,dim=-1)
	predicted_sentences=processor.batch_decode(predicted_ids)
	return predicted_sentences

def recognize_file(fn):
	wav=wave.open(fn,'rb')
	str_data=wav.readframes(wav.getnframes())
	wav.close()
	speech_array=np.frombuffer(str_data,dtype=np.short)
	inputs=processor(speech_array,sampling_rate=16_000,return_tensors='pt',padding=True).to(device)
	with torch.no_grad():
		logits=model(inputs.input_values,attention_mask=inputs.attention_mask).logits
	predicted_ids=torch.argmax(logits,dim=-1)
	predicted_sentences=processor.batch_decode(predicted_ids)
	print(f'{fn},{predicted_sentences}')

if __name__=='__main__':
	
	model=Wav2Vec2ForCTC.from_pretrained('/home/tellw/models/e2ecm').double().to(device) # Wav2Vec2ForCTC->Wav2Vec2PreTrainedModel->PreTrainedModel->nn.Module实例
	for entry in Path('/home/tellw/dataset/asr').glob('*.wav'):
		recognize_file(entry.__str__())