'''
input_data模块
用于对数据进行处理
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import hashlib
import math
import os.path
import random
import re
import sys
import tarfile

import numpy as np
from six.moves import urllib
from six.moves import xrange 
import tensorflow as tf

from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat

MAX_NUM_WAVS_PER_CLASS = 2**27 - 1  # ~134M
SILENCE_LABEL = '_silence_'
SILENCE_INDEX = 0
UNKNOWN_WORD_LABEL = '_unknown_'
UNKNOWN_WORD_INDEX = 1
BACKGROUND_NOISE_DIR_NAME = '_background_noise_'
RANDOM_SEED = 59185


def prepare_words_list(wanted_words):
	'''
	将想要识别的词，用列表表示出来，加上前两个SILENCE_LABEL、UNKNOWN_WORD_LABEL
	'''
	return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words


def which_set(filename, validation_percentage, testing_percentage):
	'''
	利用hash值，判断.wav文件是属于哪一个功能类中，training, testing, validation
	'''
	base_name = os.path.basename(filename)

	# 使用正则表达式将.wav文件的_nohash_给去掉，将剩余的用hash进行编码
	hash_name = re.sub(r'_nohash_.*$', '', base_name)
	hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
	# 获得一个在0~1之间均匀分布的值，根据这个值来将文件分类
	percentage_hash = ((int(hash_name_hashed, 16) %
				 (MAX_NUM_WAVS_PER_CLASS + 1)) *
				(100.0 / MAX_NUM_WAVS_PER_CLASS))
	if percentage_hash < validation_percentage:
		result = 'validation'
	elif percentage_hash < (testing_percentage + validation_percentage):
		result = 'testing'
	else:
		result = 'training'
	return result


def load_wav_file(filename):
	"""
	返回wav_decoder的值，维度为1 * 16000 主要功能： 读取wav文件，并解码
	"""
	with tf.Session(graph=tf.Graph()) as sess:
		wav_filename_placeholder = tf.placeholder(tf.string, [])
		wav_loader = io_ops.read_file(wav_filename_placeholder)
		wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)
		return sess.run(
			wav_decoder,
			feed_dict={wav_filename_placeholder: filename}).audio.flatten()


def save_wav_file(filename, wav_data, sample_rate):
	"""
	将数据保存到wav文件中

	"""
	with tf.Session(graph=tf.Graph()) as sess:
		wav_filename_placeholder = tf.placeholder(tf.string, [])
		sample_rate_placeholder = tf.placeholder(tf.int32, [])
		wav_data_placeholder = tf.placeholder(tf.float32, [None, 1])
		wav_encoder = contrib_audio.encode_wav(wav_data_placeholder,
								sample_rate_placeholder)
		wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)
		sess.run(
			wav_saver,
			feed_dict={
				wav_filename_placeholder: filename,
				sample_rate_placeholder: sample_rate,
				wav_data_placeholder: np.reshape(wav_data, (-1, 1))
			})


class AudioProcessor(object):
	"""
	类初始化

	初始化数据路径
	判断是否需要下载文件
	准备数据索引
	背景数据
	设置处理流图
	"""

	def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,
				wanted_words, validation_percentage, testing_percentage,
				model_settings):
		self.data_dir = data_dir
		self.maybe_download_and_extract_dataset(data_url, data_dir)
		self.prepare_data_index(silence_percentage, unknown_percentage,
						wanted_words, validation_percentage,
						testing_percentage)
		self.prepare_background_data()
		self.prepare_processing_graph(model_settings)

	def maybe_download_and_extract_dataset(self, data_url, dest_directory):
		"""
		根据传入的参数，判断是否需要从网络上下载数据
		data_url为空的时候，不从网上下载
		指定的dest_directory,指定的文件夹不需必须是解压后的文件
		"""
		if not data_url:
			return
		if not os.path.exists(dest_directory):
			os.makedirs(dest_directory)
		filename = data_url.split('/')[-1]
		filepath = os.path.join(dest_directory, filename)
		if not os.path.exists(filepath):

			def _progress(count, block_size, total_size):
				sys.stdout.write(
					'\r>> Downloading %s %.1f%%' %
					(filename, float(count * block_size) / float(total_size) * 100.0))
				sys.stdout.flush()

			try:
				filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
			except:
				tf.logging.error('Failed to download URL: %s to folder: %s', data_url,
								filepath)
				tf.logging.error('Please make sure you have enough free space and'
								' an internet connection')
				raise
			print()
			statinfo = os.stat(filepath)
			tf.logging.info('Successfully downloaded %s (%d bytes)', filename,
						statinfo.st_size)
		tarfile.open(filepath, 'r:gz').extractall(dest_directory)

	def prepare_data_index(self, silence_percentage, unknown_percentage,
					wanted_words, validation_percentage,
					testing_percentage):
		"""Prepares a list of the samples organized by set and label.
		对路径中的每一个.wav文件进行处理，判断是属于哪一个分类,并在该分类中，按照给定的比例参数，
		加入silence和unknown的数据（unknown是.wav未在识别列表中的数据）
		"""
		random.seed(RANDOM_SEED)
		wanted_words_index = {}
		for index, wanted_word in enumerate(wanted_words):
		  wanted_words_index[wanted_word] = index + 2
		self.data_index = {'validation': [], 'testing': [], 'training': []}
		unknown_index = {'validation': [], 'testing': [], 'training': []}
		all_words = {}
		# 将指定路径下的所有文件全部遍历一遍，找word_list中的文件
		search_path = os.path.join(self.data_dir, '*', '*.wav')
		for wav_path in gfile.Glob(search_path):
			_, word = os.path.split(os.path.dirname(wav_path))
			word = word.lower()
			if word == BACKGROUND_NOISE_DIR_NAME:
				continue
			all_words[word] = True
			set_index = which_set(wav_path, validation_percentage, testing_percentage)
			#如果文件在words_index中，则在data_index中添加文件路径和标签，否则加入到unknown_index
			if word in wanted_words_index:
				self.data_index[set_index].append({'label': word, 'file': wav_path})
			else:
				unknown_index[set_index].append({'label': word, 'file': wav_path})
		if not all_words:
			raise Exception('No .wavs found at ' + search_path)
		for index, wanted_word in enumerate(wanted_words):
			if wanted_word not in all_words:
				raise Exception('Expected to find ' + wanted_word +
						' in labels but only found ' +
						', '.join(all_words.keys()))

		silence_wav_path = self.data_index['training'][0]['file']
		for set_index in ['validation', 'testing', 'training']:
			set_size = len(self.data_index[set_index])
			silence_size = int(math.ceil(set_size * silence_percentage / 100))
			for _ in range(silence_size):
				self.data_index[set_index].append({
					'label': SILENCE_LABEL,
					'file': silence_wav_path
				})
			random.shuffle(unknown_index[set_index])
			unknown_size = int(math.ceil(set_size * unknown_percentage / 100))
			self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])
		for set_index in ['validation', 'testing', 'training']:
			random.shuffle(self.data_index[set_index])

		# 将数据添加到self变量中
		self.words_list = prepare_words_list(wanted_words)
		self.word_to_index = {}
		for word in all_words:
			if word in wanted_words_index:
				self.word_to_index[word] = wanted_words_index[word]
			else:
				self.word_to_index[word] = UNKNOWN_WORD_INDEX
		self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX

	def prepare_background_data(self):
		"""
		读取背景噪声.wav文件，并将解码后的数据，加入self.background_data列表中
		"""
		self.background_data = []
		background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)
		if not os.path.exists(background_dir):
			return self.background_data
		with tf.Session(graph=tf.Graph()) as sess:
			wav_filename_placeholder = tf.placeholder(tf.string, [])
			wav_loader = io_ops.read_file(wav_filename_placeholder)
			wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)
			search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,
							'*.wav')
			for wav_path in gfile.Glob(search_path):
				wav_data = sess.run(
					wav_decoder,
					feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()
				self.background_data.append(wav_data)
			if not self.background_data:
				raise Exception('No background wav files were found in ' + search_path)

	def prepare_processing_graph(self, model_settings):
		"""
		绘制处理数据的流图，读取指定文件，并补零，然后乘以背景系数，然后加上背景噪声，然后根据model_settings的设置求取mfcc，
		这里有几个placeholder
		"""
		desired_samples = model_settings['desired_samples']
		self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])
		wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
		wav_decoder = contrib_audio.decode_wav(
			wav_loader, desired_channels=1, desired_samples=desired_samples)
		self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])
		scaled_foreground = tf.multiply(wav_decoder.audio,
							self.foreground_volume_placeholder_)
		# 对数据做位移
		self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])
		self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])
		padded_foreground = tf.pad(
				scaled_foreground,
				self.time_shift_padding_placeholder_,
				mode='CONSTANT')
		sliced_foreground = tf.slice(padded_foreground,
							self.time_shift_offset_placeholder_,
							[desired_samples, -1])
		# 混合噪声
		self.background_data_placeholder_ = tf.placeholder(tf.float32,
									[desired_samples, 1])
		self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])
		background_mul = tf.multiply(self.background_data_placeholder_,
							self.background_volume_placeholder_)
		background_add = tf.add(background_mul, sliced_foreground)
		background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
		# 提取文件特征MFCC
		spectrogram = contrib_audio.audio_spectrogram(
				background_clamp,
				window_size=model_settings['window_size_samples'],
				stride=model_settings['window_stride_samples'],
				magnitude_squared=True)
		self.mfcc_ = contrib_audio.mfcc(
				spectrogram,
				wav_decoder.sample_rate,
				dct_coefficient_count=model_settings['dct_coefficient_count'])

	def set_size(self, mode):
		"""
		计算数据类型（traing,validation,testing）的数据大小 * get_data > 返回获取的数据（提取后的特征）和标签，
		维度是：样本数 * 样本长度
		"""
		return len(self.data_index[mode])

	def get_data(self, how_many, offset, model_settings, background_frequency,
		           background_volume_range, time_shift, mode, sess):
		"""
		用来获取指定数量的特征，和Minist中的next_batch类似
		"""
		candidates = self.data_index[mode]
		if how_many == -1:
			sample_count = len(candidates)
		else:
			sample_count = max(0, min(how_many, len(candidates) - offset))
		data = np.zeros((sample_count, model_settings['fingerprint_size']))
		labels = np.zeros((sample_count, model_settings['label_count']))
		desired_samples = model_settings['desired_samples']
		use_background = self.background_data and (mode == 'training')
		pick_deterministically = (mode != 'training')
		for i in xrange(offset, offset + sample_count):
			if how_many == -1 or pick_deterministically:
				sample_index = i
			else:
				sample_index = np.random.randint(len(candidates))
			sample = candidates[sample_index]
			if time_shift > 0:
				time_shift_amount = np.random.randint(-time_shift, time_shift)
			else:
				time_shift_amount = 0
			if time_shift_amount > 0:
				time_shift_padding = [[time_shift_amount, 0], [0, 0]]
				time_shift_offset = [0, 0]
			else:
				time_shift_padding = [[0, -time_shift_amount], [0, 0]]
				time_shift_offset = [-time_shift_amount, 0]
			input_dict = {
				self.wav_filename_placeholder_: sample['file'],
				self.time_shift_padding_placeholder_: time_shift_padding,
				self.time_shift_offset_placeholder_: time_shift_offset,
			}
			# 如果使用背景噪声的话，就找个背景噪声和源文件混合
			if use_background:
				background_index = np.random.randint(len(self.background_data))
				background_samples = self.background_data[background_index]
				background_offset = np.random.randint(
					0, len(background_samples) - model_settings['desired_samples'])
				background_clipped = background_samples[background_offset:(
					background_offset + desired_samples)]
				background_reshaped = background_clipped.reshape([desired_samples, 1])
				if np.random.uniform(0, 1) < background_frequency:
					background_volume = np.random.uniform(0, background_volume_range)
				else:
					background_volume = 0
			else:
				background_reshaped = np.zeros([desired_samples, 1])
				background_volume = 0
			input_dict[self.background_data_placeholder_] = background_reshaped
			input_dict[self.background_volume_placeholder_] = background_volume
			# 将所有数据变为0来获取silence的数据
			if sample['label'] == SILENCE_LABEL:
				input_dict[self.foreground_volume_placeholder_] = 0
			else:
				input_dict[self.foreground_volume_placeholder_] = 1
			data[i - offset, :] = sess.run(self.mfcc_, feed_dict=input_dict).flatten()
			label_index = self.word_to_index[sample['label']]
			labels[i - offset, label_index] = 1
		return data, labels

	def get_wav_files(self, how_many, offset, model_settings, mode):
		'''
		获取wav文件
		'''
		candidates = self.data_index[mode]
		if how_many == -1:
			sample_count = len(candidates)
		else:
			sample_count = max(0, min(how_many, len(candidates) - offset))
		pick_deterministically = (mode != 'training')
		wav_files = []
		labels = np.zeros((sample_count, model_settings['label_count']))
		for i in xrange(offset, offset + sample_count):
			if how_many == -1 or pick_deterministically:
				sample_index = i
			else:
				sample_index = np.random.randint(len(candidates))
			sample = candidates[sample_index]
			if sample['label'] == SILENCE_LABEL:
				wav_files.append('silence.wav')
			else:
				wav_files.append(sample['file'])
			label_index = self.word_to_index[sample['label']]
			labels[i - offset, label_index] = 1
		return wav_files, labels


	def get_unprocessed_data(self, how_many, model_settings, mode):
		"""
		读取为处理的wav数据
		"""
		candidates = self.data_index[mode]
		if how_many == -1:
			sample_count = len(candidates)
		else:
			sample_count = how_many
		desired_samples = model_settings['desired_samples']
		words_list = self.words_list
		data = np.zeros((sample_count, desired_samples))
		labels = []
		with tf.Session(graph=tf.Graph()) as sess:
			wav_filename_placeholder = tf.placeholder(tf.string, [])
			wav_loader = io_ops.read_file(wav_filename_placeholder)
			wav_decoder = contrib_audio.decode_wav(
					wav_loader, desired_channels=1, desired_samples=desired_samples)
			foreground_volume_placeholder = tf.placeholder(tf.float32, [])
			scaled_foreground = tf.multiply(wav_decoder.audio,
								foreground_volume_placeholder)
			for i in range(sample_count):
				if how_many == -1:
					sample_index = i
				else:
					sample_index = np.random.randint(len(candidates))
				sample = candidates[sample_index]
				input_dict = {wav_filename_placeholder: sample['file']}
				if sample['label'] == SILENCE_LABEL:
					input_dict[foreground_volume_placeholder] = 0
				else:
					input_dict[foreground_volume_placeholder] = 1
				data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()
				label_index = self.word_to_index[sample['label']]
				labels.append(words_list[label_index])
		return data, labels
