#!/bin/python

# 代码借鉴自https://github.com/DanielLin94144/End-to-End-jointCTC-Attention-ASR/blob/master/building_an_end_to_end_speech_recognition_model_in_pytorch.py

import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset
import numpy as np
import torchaudio
import os
import torch
import wave

def _levenshtein_distance(ref,hyp):
	m=len(ref)
	n=len(hyp)
	if ref==hyp:
		return 0
	if m==0:
		return n
	if n==0:
		return m
	if m<n:
		ref,hyp=hyp,ref
		m,n=n,m
	distance=np.zeros((2,n+1),dtype=np.int32)
	for j in range(0,n+1):
		distance[0][j]=j
	for i in range(1,m+1):
		prev_row_idx=(i-1)%2
		cur_row_idx=i%2
		distance[cur_row_idx][0]=i
		for j in range(1,n+1):
			if ref[i-1]==hyp[j-1]:
				distance[cur_row_idx][j]=distance[prev_row_idx][j-1]
			else:
				s_num=distance[prev_row_idx][j-1]+1
				i_num=distance[cur_row_idx][j-1]+1
				d_num=distance[prev_row_idx][j]+1
				distance[cur_row_idx][j]=min(s_num,i_num,d_num)
	return distance[m%2][n]

def word_errors(reference,hypothesis,ignore_case=False,delimiter=' '):
	if ignore_case==True:
		reference=reference.lower()
		hypothesis=hypothesis.lower()
	ref_words=reference.split(delimiter)
	hyp_words=hypothesis.split(delimiter)
	edit_distance=_levenshtein_distance(ref_words,hyp_words)
	return float(edit_distance),len(ref_words)

def char_errors(reference,hypothesis,ignore_case=False,remove_space=False):
	if ignore_case==True:
		reference=reference.lower()
		hypothesis=hypothesis.lower()
	join_char=' '
	if remove_space==True:
		join_char=''
	reference=join_char.join(filter(None,reference.split(' ')))
	hypothesis=join_char.join(filter(None,hypothesis.split(' ')))
	edit_distance=_levenshtein_distance(reference,hypothesis)
	return float(edit_distance),len(reference)

def wer(reference,hypothesis,ignore_case=False,delimiter=' '):
	edit_distance,ref_len=word_errors(reference,hypothesis,ignore_case,delimiter)
	if ref_len==0:
		raise ValueError('Reference\'s word number should be greater than 0.')
	wer=float(edit_distance)/ref_len
	return wer

def cer(reference,hypothesis,ignore_case=False,remove_space=False):
	edit_distance,ref_len=char_errors(reference,hypothesis,ignore_case,remove_space)
	if ref_len==0:
		raise ValueError('Length of reference should be greater than 0.')
	cer=float(edit_distance)/ref_len
	return cer

class TrainDataset(Dataset):
	def __init__(self):
		self.dataset_dir='/home/tellw/dataset/librispeech_dev_completion/train'
		transcriptions={}
		with open(f'{self.dataset_dir}/transcriptions.txt','r',encoding='utf8') as f:
			content=f.read()
		for line in content.strip().split('\n'):
			ls=line.split(' ',1)
			transcriptions[ls[0]]=ls[1]
		transcriptions=dict(sorted(transcriptions.items(),key=lambda x:x[0]))
		train_audio_names=list(transcriptions.keys())[:int(len(transcriptions.items())*0.8)]
		self.train_transcriptions=list(transcriptions.values())[:int(len(transcriptions.items())*0.8)]
		self.train_audio_datas=[]
		for tan in train_audio_names:
			wav=wave.open(f'{self.dataset_dir}/{tan}.wav','rb')
			str_data=wav.readframes(wav.getnframes())
			wav.close()
			self.train_audio_datas.append(torch.tensor(np.frombuffer(str_data,dtype=np.short).reshape(1,-1),dtype=torch.float32))

	def __getitem__(self,index):
		return self.train_audio_datas[index],self.train_transcriptions[index]

	def __len__(self):
		return len(self.train_audio_datas)

class TestDataset(Dataset):
	def __init__(self):
		self.dataset_dir='/home/tellw/dataset/librispeech_dev_completion/test'
		transcriptions={}
		with open(f'{self.dataset_dir}/transcriptions.txt','r',encoding='utf8') as f:
			content=f.read()
		for line in content.strip().split('\n'):
			ls=line.split(' ',1)
			transcriptions[ls[0]]=ls[1]
		transcriptions=dict(sorted(transcriptions.items(),key=lambda x:x[0]))
		test_audio_names=list(transcriptions.keys())[int(len(transcriptions.items())*0.8):]
		self.test_transcriptions=list(transcriptions.values())[int(len(transcriptions.items())*0.8):]
		self.test_audio_datas=[]
		for tan in test_audio_names:
			wav=wave.open(f'{self.dataset_dir}/{tan}.wav','rb')
			str_data=wav.readframes(wav.getnframes())
			wav.close()
			self.test_audio_datas.append(torch.tensor(np.frombuffer(str_data,dtype=np.short).reshape(1,-1),dtype=torch.float32))


	def __getitem__(self,index):
		return self.test_audio_datas[index],self.test_transcriptions[index]

	def __len__(self):
		return len(self.test_audio_datas)

class TextTransform:
	def __init__(self):
		char_map_str="""
		' 0
		<SPACE> 1
		a 2
		b 3
		c 4
		d 5
		e 6
		f 7
		g 8
		h 9
		i 10
		j 11
		k 12
		l 13
		m 14
		n 15
		o 16
		p 17
		q 18
		r 19
		s 20
		t 21
		u 22
		v 23
		w 24
		x 25
		y 26
		z 27
		"""
		self.char_map={}
		self.index_map={}
		for line in char_map_str.strip().split('\n'):
			ch,index=line.split()
			self.char_map[ch]=int(index)
			self.index_map[int(index)]=ch
		self.index_map[1]=' '

	def text_to_int(self,text):
		int_sequence=[]
		for c in text:
			if c==' ':
				ch=self.char_map['<SPACE>']
			else:
				ch=self.char_map[c]
			int_sequence.append(ch)
		return int_sequence

	def int_to_text(self,labels):
		string=[]
		for i in labels:
			string.append(self.index_map[i])
		return ''.join(string).replace('<SPACE>',' ')

train_audio_transforms=nn.Sequential(
			torchaudio.transforms.MelSpectrogram(sample_rate=16000,n_mels=128),
			torchaudio.transforms.FrequencyMasking(freq_mask_param=30),
			torchaudio.transforms.TimeMasking(time_mask_param=100)
		)

valid_audio_transforms=torchaudio.transforms.MelSpectrogram()

def fpfe(wav_signal,data_type='train'):
	if data_type=='train':
		return torch.log(train_audio_transforms(torch.tensor(wav_signal.reshape(1,-1),dtype=torch.float32)).squeeze(0).transpose(0,1)+1)
	else:
		return torch.log(valid_audio_transforms(torch.tensor(wav_signal.reshape(1,-1),dtype=torch.float32)).squeeze(0).transpose(0,1)+1)

text_transform=TextTransform()

def train_data_preprocessor(features,utterances):
	labels=[]
	input_lengths=[]
	label_lengths=[]
	# print(features)
	for feature,utterance in zip(features,utterances):
		# print('e2e-joint-ctc-attention-208,feature.shape',feature.shape)
		label=torch.Tensor(text_transform.text_to_int(utterance.lower()))
		labels.append(label)
		input_lengths.append(feature.shape[0]//2)
		label_lengths.append(len(label))
	# spectrograms=nn.utils.rnn.pad_sequence(features,batch_first=True).unsqueeze(1).transpose(2,3)
	spectrograms=nn.utils.rnn.pad_sequence(features,batch_first=True).unsqueeze(1)
	labels=nn.utils.rnn.pad_sequence(labels,batch_first=True,padding_value=28.0)
	return spectrograms,labels,(input_lengths,label_lengths)

def val_data_preprocessor(features,utterances):
	# spectrograms=nn.utils.rnn.pad_sequence(features,batch_first=True).unsqueeze(1).transpose(2,3)
	spectrograms=nn.utils.rnn.pad_sequence(features,batch_first=True).unsqueeze(1)
	return spectrograms,utterances,None

def test_data_preprocessor(features,utterances):
	# spectrograms=nn.utils.rnn.pad_sequence(features,batch_first=True).unsqueeze(1).transpose(2,3)
	spectrograms=nn.utils.rnn.pad_sequence(features,batch_first=True).unsqueeze(1)
	return spectrograms,None,None

def get_loss_function(device):
	criterion=nn.CTCLoss(blank=28).to(device)
	def loss(data,outputs):
		outputs=F.log_softmax(outputs,dim=2)
		# print('e2e-joint-ctc-attention-228,outputs',outputs)
		outputs=outputs.transpose(0,1)
		# print('e2e-joint-ctc-attention-234,outputs,data',outputs,outputs.shape,data[1],data[2][0],data[2][1])
		l=criterion(outputs,data[1],data[2][0],data[2][1])
		# print('e2e-joint-ctc-attention-236,l',l)
		return l
	return loss

def post_processor(outputs,blank_label=28,collapse_repeated=True):
	outputs=F.log_softmax(outputs,dim=2)
	arg_maxes=torch.argmax(outputs,dim=2)
	# print('e2e-joint-ctc-attention-237,arg_maxes',arg_maxes)
	decodes=[]
	for args in arg_maxes:
		decode=[]
		for j,index in enumerate(args):
			if index!=blank_label:
				if collapse_repeated and j!=0 and index==args[j-1]:
					continue
				decode.append(index.item())
		decodes.append(text_transform.int_to_text(decode).upper())
	return decodes

class SpeechFeatureMeta():
    '''
    声学特征提取类的基类
    '''
    def __init__(self,framerate=16000):
        self.framerate=framerate

    def run(self,wavsignal,fs=16000):
        raise NotImplementedError('run() method is not implemented')

import random
from scipy.fftpack import fft

class SpecAugment(SpeechFeatureMeta):
    '''
    复现谷歌SpecAugment数据增强特征算法，基于Spectrogram语谱图基础特征
    '''
    def __init__(self,framerate=16000,timewindow=16,timeshift=10):
        self.time_window=timewindow
        self.window_length=int(framerate/1000*self.time_window)
        self.timeshift=timeshift
        self.x=np.linspace(0,self.window_length-1,self.window_length,dtype=np.int16)
        self.w=0.54-0.46*np.cos(2*np.pi*self.x/(self.window_length-1))
        super().__init__(framerate)

    def run(self,wavsignal,samplerate=16000):
        self.framerate=samplerate
        range0_end=int(len(wavsignal)/self.framerate*1000-self.time_window)//self.timeshift+1
        data_input=np.zeros((range0_end,self.window_length//2),dtype=np.float64)
        data_line=np.zeros((1,self.window_length),dtype=np.float64)
        for i in range(0,range0_end):
            p_start=i*int(self.framerate/1000*self.timeshift)
            p_end=p_start+self.window_length
            data_line=wavsignal[p_start:p_end]
            data_line=data_line*self.w
            data_line=np.abs(fft(data_line))
            data_input[i]=data_line[0:self.window_length//2]
        data_input=np.log(data_input+1)
        mode=random.randint(1,100)
        h_start=random.randint(1,data_input.shape[0])
        h_width=random.randint(1,100)
        v_start=random.randint(1,data_input.shape[1])
        v_width=random.randint(1,100)
        if mode<=60:
            pass
        elif 60<mode<=75:
            data_input[h_start:h_start+h_width,:]=0
        elif 75<mode<=90:
            data_input[:,v_start:v_start+v_width]=0
        else:
            data_input[h_start:h_start+h_width,v_start:v_start+v_width]=0
        return data_input

augment=SpecAugment()

def data_processing(data,data_type='train'):
	spectrograms=[]
	labels=[]
	input_lengths=[]
	label_lengths=[]
	# print(data)
	for waveform,utterance in data:
		# print(waveform,data_type,waveform.shape)
		if data_type=='train':
			spec=torch.log(train_audio_transforms(waveform).squeeze(0).transpose(0,1)+1)
			# spec=torch.tensor(augment.run(waveform.squeeze(0).cpu().numpy()),dtype=torch.float32)
		elif data_type=='valid':
			spec=torch.log(valid_audio_transforms(waveform).squeeze(0).transpose(0,1)+1)
			# spec=torch.tensor(augment.run(waveform.squeeze(0).cpu().numpy()),dtype=torch.float32)
		else:
			raise Exception('data_type should be train or valid')
		spectrograms.append(spec)
		# print(spec,spec.shape)
		label=torch.Tensor(text_transform.text_to_int(utterance.lower()))
		labels.append(label)
		input_lengths.append(spec.shape[0]//2)
		label_lengths.append(len(label))
	# spectrograms=nn.utils.rnn.pad_sequence(spectrograms,batch_first=True).unsqueeze(1).transpose(2,3)
	spectrograms=nn.utils.rnn.pad_sequence(spectrograms,batch_first=True).unsqueeze(1)
	labels=nn.utils.rnn.pad_sequence(labels,batch_first=True,padding_value=28.0)
	# print(spectrograms,spectrograms.shape)
	return spectrograms,labels,input_lengths,label_lengths

def GreedyDecoder(output,labels,label_lengths,blank_label=28,collapse_repeated=True):
	arg_maxes=torch.argmax(output,dim=2)
	decodes=[]
	targets=[]
	for i,args in enumerate(arg_maxes):
		decode=[]
		targets.append(text_transform.int_to_text(labels[i][:label_lengths[i]].tolist()))
		for j,index in enumerate(args):
			if index!=blank_label:
				if collapse_repeated and j!=0 and index==args[j-1]:
					continue
				decode.append(index.item())
		decodes.append(text_transform.int_to_text(decode))
	return decodes,targets

class CNNLayerNorm(nn.Module):
	def __init__(self,n_feats):
		super(CNNLayerNorm,self).__init__()
		self.layer_norm=nn.LayerNorm(n_feats)

	def forward(self,x):
		# x=x.transpose(2,3).contiguous()
		x=self.layer_norm(x)
		# return x.transpose(2,3).contiguous()
		return x

class ResidualCNN(nn.Module):
	def __init__(self,in_channels,out_channels,kernel,stride,dropout,n_feats):
		super(ResidualCNN,self).__init__()
		self.cnn1=nn.Conv2d(in_channels,out_channels,kernel,stride,padding=kernel//2)
		self.cnn2=nn.Conv2d(out_channels,out_channels,kernel,stride,padding=kernel//2)
		self.dropout1=nn.Dropout(dropout)
		self.dropout2=nn.Dropout(dropout)
		self.layer_norm1=CNNLayerNorm(n_feats)
		self.layer_norm2=CNNLayerNorm(n_feats)

	def forward(self,x):
		residual=x
		x=self.layer_norm1(x)
		x=F.gelu(x)
		x=self.dropout1(x)
		x=self.cnn1(x)
		x=self.layer_norm2(x)
		x=F.gelu(x)
		x=self.dropout2(x)
		x=self.cnn2(x)
		x+=residual
		return x

class BidirectionalGRU(nn.Module):
	def __init__(self,rnn_dim,hidden_size,dropout,batch_first):
		super(BidirectionalGRU,self).__init__()
		self.BiGRU=nn.GRU(input_size=rnn_dim,hidden_size=hidden_size,num_layers=1,batch_first=batch_first,bidirectional=True)
		self.layer_norm=nn.LayerNorm(rnn_dim)
		self.dropout=nn.Dropout(dropout)

	def forward(self,x):
		x=self.layer_norm(x)
		x=F.gelu(x)
		x,_=self.BiGRU(x)
		x=self.dropout(x)
		return x

def print_tensor(x,shape,sl):
	if sl==4:
		print('[ ',end='')
		for i in range(shape[0]):
			print('[ ',end='')
			for j in range(shape[1]):
				print('[ ',end='')
				for k in range(shape[2]):
					print('[ ',end='')
					for l in range(shape[3]):
						print(f'{x[i,j,k,l].item():.2f}',end=', ')
					print(' ]')
				print(' ]',end='')
			print(' ]',end='')
		print(' ]',end='')

def cal_cos_sim(x,shape):
	def single_cos_sim(x1,y):
		return np.dot(x1,y)/(np.linalg.norm(x1)*np.linalg.norm(y))
	# cos_sims=[]
	x1=x[0,0,0,:].cpu().numpy()
	min_sim=1
	difference_feature=None
	for i in range(1,shape[2]):
		# cos_sims.append(single_cos_sim(x1,x[0,0,i,:].cpu().numpy()))
		sim=single_cos_sim(x1,x[0,0,i,:].cpu().numpy())
		if sim<min_sim:
			min_sim=min_sim
			difference_feature=x[0,0,i,:].cpu().numpy()
	import matplotlib.pyplot as plt
	# plt.plot(np.arange(1,shape[2]),cos_sims)
	plt.plot(np.arange(len(x1)),x1)
	plt.plot(np.arange(len(x1)),difference_feature)
	plt.savefig('/home/tellw/share/test.jpg')

class SpeechRecognitionModel(nn.Module):
	def __init__(self,n_cnn_layers,n_rnn_layers,rnn_dim,n_class,n_feats,stride=2,dropout=0.1):
		super(SpeechRecognitionModel,self).__init__()
		n_feats=n_feats//2
		self.cnn=nn.Conv2d(1,32,3,stride=stride,padding=3//2)
		self.rescnn_layers=nn.Sequential(*[ResidualCNN(32,32,kernel=3,stride=1,dropout=dropout,n_feats=n_feats) for _ in range(n_cnn_layers)])
		self.fully_connected=nn.Linear(n_feats*32,rnn_dim)
		self.birnn_layers=nn.Sequential(*[BidirectionalGRU(rnn_dim=rnn_dim if i==0 else rnn_dim*2,hidden_size=rnn_dim,dropout=dropout,batch_first=i==0) for i in range(n_rnn_layers)])
		self.classifier=nn.Sequential(nn.Linear(rnn_dim*2,rnn_dim),nn.GELU(),nn.Dropout(dropout),nn.Linear(rnn_dim,n_class))
		# self.classifier=nn.Sequential(nn.Linear(rnn_dim,rnn_dim),nn.GELU(),nn.Dropout(dropout),nn.Linear(rnn_dim,n_class))

	def forward(self,x):
		# print(x,x.shape)
		# print_tensor(x,x.shape,len(x.shape))
		# cal_cos_sim(x,x.shape)
		x=self.cnn(x)
		# print(x.shape)
		x=self.rescnn_layers(x)
		# print(x.shape)
		x=x.transpose(1,2)
		sizes=x.size()
		x=x.reshape(sizes[0],sizes[1],sizes[2]*sizes[3])
		# print(x)
		x=self.fully_connected(x)
		# print(x,x.shape)
		x=self.birnn_layers(x)
		# print(x,x.shape)
		x=self.classifier(x)
		return x

class IterMeter(object):
	def __init__(self):
		self.val=0

	def step(self):
		self.val+=1

	def get(self):
		return self.val

def train(model,device,train_loader,criterion,optimizer,scheduler,epoch,iter_meter):
	model.train()
	data_len=len(train_loader.dataset)
	for batch_idx,_data in enumerate(train_loader):
		# print(f'{batch_idx}号训练迭代')
		spectrograms,labels,input_lengths,label_lengths=_data
		spectrograms,labels=spectrograms.to(device),labels.to(device)
		optimizer.zero_grad()
		output=model(spectrograms)
		# print('e2e-joint-ctc-attention-477',spectrograms,output)
		output=F.log_softmax(output,dim=2)
		output=output.transpose(0,1)
		loss=criterion(output,labels,input_lengths,label_lengths)
		# for name,parms in model.named_parameters():
			# print('e2e-joint-ctc-attention-372,name,para,grad_requires,grad_value',name,parms,parms.requires_grad,parms.grad)
			# if parms.grad is None:
			# 	tip='None'
			# else:
			# 	tip=(parms.grad==0.).all()
			# with open('test.txt','a',encoding='utf8') as f:
			# 	f.write(f'e2e-joint-ctc-attention-372,name,para,grad_requires,grad_value,{name},{parms},{parms.requires_grad},{parms.grad},{tip}')
		loss.backward()
		optimizer.step()
		scheduler.step()
		iter_meter.step()
		# import sys;sys.exit()
		if batch_idx%100==0 or batch_idx==data_len:
			print(f'Train Epoch: {epoch} [{batch_idx*len(spectrograms)}/{data_len}] ({100.*batch_idx/len(train_loader):.0f}%)\tLoss: {loss.item():.6f}')
			torch.save(model.state_dict(),'pytorch_ctc_joint_attention_asr_model.pth')
	# print('完成一epoch的训练')

def test(model,device,test_loader,criterion,epoch,iter_meter):
	print('\nevaluating...')
	model.eval()
	test_loss=0
	test_cer,test_wer=[],[]
	with torch.no_grad():
		for i,_data in enumerate(test_loader):
			spectrograms,labels,input_lengths,label_lengths=_data
			spectrograms,labels=spectrograms.to(device),labels.to(device)
			output=model(spectrograms)
			output=F.log_softmax(output,dim=2)
			output=output.transpose(0,1)
			loss=criterion(output,labels,input_lengths,label_lengths)
			test_loss+=loss.item()/len(test_loader)
			decoded_preds,decoded_targets=GreedyDecoder(output.transpose(0,1),labels,label_lengths)
			for j in range(len(decoded_preds)):
				print(f'target:{decoded_targets[j]}, prediction:{decoded_preds[j]}')
				test_cer.append(cer(decoded_targets[j],decoded_preds[j]))
				test_wer.append(wer(decoded_targets[j],decoded_preds[j]))
	avg_cer=sum(test_cer)/len(test_cer)
	avg_wer=sum(test_wer)/len(test_wer)
	print(f'Test set: Average loss: {test_loss:.4f}, Average CER: {avg_cer:.4f}, Average WER: {avg_wer:.4f}\n')

def main(learning_rate=5e-4,batch_size=1,epochs=10,train_url='train-clean-100',test_url='test-clean'):
	hparams={
		'n_cnn_layers':3,
		'n_rnn_layers':5,
		'rnn_dim':512,
		'n_class':29,
		'n_feats':128,
		'stride':2,
		'dropout':0.1,
		'learning_rate':learning_rate,
		'batch_size':batch_size,
		'epochs':epochs
	}
	use_cuda=torch.cuda.is_available()
	torch.manual_seed(7)
	device=torch.device('cuda' if use_cuda else 'cpu')

	kwargs={'num_workers':1,'pin_memory':True} if use_cuda else {}

	train_dataset=TrainDataset()
	test_dataset=TestDataset()
	train_loader=torch.utils.data.DataLoader(dataset=train_dataset,batch_size=hparams['batch_size'],shuffle=True,collate_fn=lambda x:data_processing(x,'train'),**kwargs)
	test_loader=torch.utils.data.DataLoader(dataset=test_dataset,batch_size=hparams['batch_size'],shuffle=False,collate_fn=lambda x:data_processing(x,'valid'),**kwargs) # 语音声学特征矩阵形状：1,1,128,2000（2000可变，第一维表示批量大小）

	model=SpeechRecognitionModel(hparams['n_cnn_layers'],hparams['n_rnn_layers'],hparams['rnn_dim'],hparams['n_class'],hparams['n_feats'],hparams['stride'],hparams['dropout']).to(device)
	# if os.path.exists('pytorch_ctc_joint_attention_asr_model.pth'):
	# 	model.load_state_dict(torch.load('pytorch_ctc_joint_attention_asr_model.pth'))
	print(model)
	print('Num Model Parameters',sum([param.nelement() for param in model.parameters()]))

	optimizer=optim.AdamW(model.parameters(),hparams['learning_rate'])
	criterion=nn.CTCLoss(blank=28).to(device)
	scheduler=optim.lr_scheduler.OneCycleLR(optimizer,max_lr=hparams['learning_rate'],steps_per_epoch=int(len(train_loader)),epochs=hparams['epochs'],anneal_strategy='linear')
	iter_meter=IterMeter()
	for epoch in range(1,epochs+1):
		train(model,device,train_loader,criterion,optimizer,scheduler,epoch,iter_meter)
		test(model,device,test_loader,criterion,epoch,iter_meter)

if __name__=='__main__':

	learning_rate=5e-4
	batch_size=4
	epochs=100
	libri_train_set='train-clean-100'
	libri_test_set='test-clean'

	main(learning_rate,batch_size,epochs,libri_train_set,libri_test_set)