import numpy as np
# import pyaudio
import tensorflow as tf
from datasets import audio
from infolog import log
from tacotron.models import create_model
from tacotron.utils.text import text_to_sequence


class Synthesizer:
	def load(self, checkpoint_path, hparams, gta=False, model_name='Tacotron'):
		log('Constructing model: %s' % model_name)
		#Force the batch size to be known in order to use attention masking in batch synthesis
		inputs = tf.placeholder(tf.int32, (None, None), name='inputs')
		input_lengths = tf.placeholder(tf.int32, (None), name='input_lengths')
		targets = tf.placeholder(tf.float32, (None, None, hparams.num_mels), name='mel_targets')
		split_infos = tf.placeholder(tf.int32, shape=(hparams.tacotron_num_gpus, None), name='split_infos')
		with tf.variable_scope('Tacotron_model', reuse=tf.AUTO_REUSE) as scope:
			self.model = create_model(model_name, hparams)
			if gta:
				self.model.initialize(inputs, input_lengths, targets, gta=gta, split_infos=split_infos)
			else:
				self.model.initialize(inputs, input_lengths, split_infos=split_infos)

			self.mel_outputs = self.model.tower_mel_outputs
			self.linear_outputs = self.model.tower_linear_outputs if (hparams.predict_linear and not gta) else None
			self.alignments = self.model.tower_alignments
			self.stop_token_prediction = self.model.tower_stop_token_prediction
			self.targets = targets

		if hparams.GL_on_GPU:
			self.GLGPU_mel_inputs = tf.placeholder(tf.float32, (None, hparams.num_mels), name='GLGPU_mel_inputs')
			self.GLGPU_lin_inputs = tf.placeholder(tf.float32, (None, hparams.num_freq), name='GLGPU_lin_inputs')

			self.GLGPU_mel_outputs = audio.inv_mel_spectrogram_tensorflow(self.GLGPU_mel_inputs, hparams)
			self.GLGPU_lin_outputs = audio.inv_linear_spectrogram_tensorflow(self.GLGPU_lin_inputs, hparams)

		self.gta = gta
		self._hparams = hparams
		#pad input sequences with the <pad_token> 0 ( _ )
		self._pad = 0
		#explicitely setting the padding to a value that doesn't originally exist in the spectogram
		#to avoid any possible conflicts, without affecting the output range of the model too much
		if hparams.symmetric_mels:
			self._target_pad = -hparams.max_abs_value
		else:
			self._target_pad = 0.

		self.inputs = inputs
		self.input_lengths = input_lengths
		self.targets = targets
		self.split_infos = split_infos

		log('Loading checkpoint: %s' % checkpoint_path)
		#Memory allocation on the GPUs as needed
		config = tf.ConfigProto()
		config.gpu_options.allow_growth = True
		config.allow_soft_placement = True

		self.session = tf.Session(config=config)
		self.session.run(tf.global_variables_initializer())

		saver = tf.train.Saver()
		saver.restore(self.session, checkpoint_path)

	def evals(self, texts):
		hparams = self._hparams
		cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
		# [-max, max] or [0,max]
		T2_output_range = (-hparams.max_abs_value, hparams.max_abs_value) if hparams.symmetric_mels else (
		0, hparams.max_abs_value)

		assert 0 == len(texts) % self._hparams.tacotron_num_gpus
		seqs = [np.asarray(text_to_sequence(text, cleaner_names)) for text in texts]
		input_lengths = [len(seq) for seq in seqs]

		size_per_device = len(seqs) // self._hparams.tacotron_num_gpus

		# Pad inputs according to each GPU max length
		input_seqs = None
		split_infos = []
		for i in range(self._hparams.tacotron_num_gpus):
			device_input = seqs[size_per_device * i: size_per_device * (i + 1)]
			device_input, max_seq_len = self._prepare_inputs(device_input)
			input_seqs = np.concatenate((input_seqs, device_input), axis=1) if input_seqs is not None else device_input
			split_infos.append([max_seq_len, 0, 0, 0])

		feed_dict = {
			self.inputs: input_seqs,
			self.input_lengths: np.asarray(input_lengths, dtype=np.int32),
		}

		feed_dict[self.split_infos] = np.asarray(split_infos, dtype=np.int32)


		linears, mels, alignments, stop_tokens = self.session.run(
			[self.linear_outputs, self.mel_outputs, self.alignments, self.stop_token_prediction],
			feed_dict=feed_dict)

		# Linearize outputs (1D arrays)
		linears = [linear for gpu_linear in linears for linear in gpu_linear]
		mels = [mel for gpu_mels in mels for mel in gpu_mels]
		stop_tokens = [token for gpu_token in stop_tokens for token in gpu_token]

		# Natural batch synthesis
		# Get Mel/Linear lengths for the entire batch from stop_tokens predictions
		target_lengths = self._get_output_lengths(stop_tokens)

		# Take off the batch wise padding
		mels = [mel[:target_length, :] for mel, target_length in zip(mels, target_lengths)]
		linears = [linear[:target_length, :] for linear, target_length in zip(linears, target_lengths)]
		linears = np.clip(linears, T2_output_range[0], T2_output_range[1])
		assert len(mels) == len(linears) == len(texts)

		mels = np.clip(mels, T2_output_range[0], T2_output_range[1])

		results = []
		for i, mel in enumerate(mels):

			if hparams.predict_linear:
				# save wav (linear -> wav)
				if hparams.GL_on_GPU:
					wav = self.session.run(self.GLGPU_lin_outputs, feed_dict={self.GLGPU_lin_inputs: linears[i]})
					wav = audio.inv_preemphasis(wav, hparams.preemphasis, hparams.preemphasize)
				else:
					print(linears[i])
					wav = audio.inv_linear_spectrogram(linears[i].T, hparams)
				#----
				# p = pyaudio.PyAudio()
				# stream = p.open(format=pyaudio.paFloat32,
				# 					 channels=1,
				# 					 rate=36000,
				# 					 output=True
				# 					 )
				# # Assuming you have a numpy array called samples
				# data = wav.astype(np.float32).tostring()
				# stream.write(data)
				#---
			results.append(wav)
		return np.concatenate(results)

	def _round_up(self, x, multiple):
		remainder = x % multiple
		return x if remainder == 0 else x + multiple - remainder

	def _prepare_inputs(self, inputs):
		max_len = max([len(x) for x in inputs])
		return np.stack([self._pad_input(x, max_len) for x in inputs]), max_len

	def _pad_input(self, x, length):
		return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=self._pad)

	def _prepare_targets(self, targets, alignment):
		max_len = max([len(t) for t in targets])
		data_len = self._round_up(max_len, alignment)
		return np.stack([self._pad_target(t, data_len) for t in targets]), data_len

	def _pad_target(self, t, length):
		return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode='constant', constant_values=self._target_pad)

	def _get_output_lengths(self, stop_tokens):
		#Determine each mel length by the stop token predictions. (len = first occurence of 1 in stop_tokens row wise)
		output_lengths = [row.index(1) if 1 in row else len(row) for row in np.round(stop_tokens).tolist()]
		return output_lengths
