import numpy as np
import librosa
import soundfile as sf
from scipy import signal
import matplotlib.pyplot as plt


class Audio3DProcessor:
	def __init__(self, sample_rate=44100):
		self.sample_rate = sample_rate

	def load_audio(self, file_path):
		"""加载音频文件"""
		audio, sr = librosa.load(file_path, sr=self.sample_rate,
								 mono=False)
		# 如果是单声道，转换为双声道
		if len(audio.shape) == 1:
			audio = np.array([audio, audio])
		return audio, sr

	def create_simple_hrtf(self, angle_degrees, distance=1.0):
		"""
		创建简化的HRTF滤波器
		angle_degrees: 声源角度（0°为正前方，90°为右侧，-90°为左侧）
		distance: 声源距离（模拟距离衰减）
		"""
		angle_rad = np.radians(angle_degrees)

		# 计算时间差（基于声速343m/s）
		head_radius = 0.0875  # 平均头部半径（米）
		itd = (head_radius / 343) * (angle_rad + np.sin(angle_rad))
		itd_samples = int(np.abs(itd) * self.sample_rate)

		# 计算强度差
		iid = np.cos(angle_rad) * 0.5 + 0.5  # 简化模型

		# 创建左右耳滤波器
		# 左耳滤波器
		left_delay = itd_samples if angle_degrees >= 0 else 0
		left_gain = 1.0 if angle_degrees <= 0 else 1.0 - iid * 0.3

		# 右耳滤波器
		right_delay = itd_samples if angle_degrees <= 0 else 0
		right_gain = 1.0 if angle_degrees >= 0 else 1.0 - iid * 0.3

		# 应用距离衰减
		distance_attenuation = 1.0 / (distance ** 0.5)
		left_gain *= distance_attenuation
		right_gain *= distance_attenuation

		return {
			'left_delay': left_delay,
			'right_delay': right_delay,
			'left_gain': left_gain,
			'right_gain': right_gain
		}

	def apply_hrtf(self, audio, angle_degrees, distance=1.0):
		"""应用HRTF滤波器到音频"""
		hrtf_params = self.create_simple_hrtf(angle_degrees, distance)

		# 处理左声道
		left_audio = np.roll(audio[0], hrtf_params['left_delay'])
		left_audio = left_audio * hrtf_params['left_gain']

		# 处理右声道
		right_audio = np.roll(audio[1], hrtf_params['right_delay'])
		right_audio = right_audio * hrtf_params['right_gain']

		# 确保长度一致
		max_len = max(len(left_audio), len(right_audio))
		left_audio = np.pad(left_audio, (0, max_len - len(left_audio)))
		right_audio = np.pad(right_audio, (0, max_len - len(right_audio)))

		return np.array([left_audio, right_audio])

	def create_reverb(self, audio, decay=0.5, delay_ms=30):
		"""添加简单的混响效果"""
		delay_samples = int(delay_ms * self.sample_rate / 1000)

		# 创建延迟线
		wet_signal = np.zeros_like(audio)
		for channel in range(audio.shape[0]):
			delayed = np.roll(audio[channel], delay_samples)
			delayed[:delay_samples] = 0
			wet_signal[channel] = delayed * decay

		# 混合原始信号和混响信号
		output = audio + wet_signal
		return output

	def simulate_3d_movement(self, audio, movement_pattern='circle'):
		"""
		模拟3D空间中的声源移动
		movement_pattern: 移动模式 ('circle', 'left_to_right', 'front_to_back')
		"""
		duration = audio.shape[1] / self.sample_rate
		output = np.zeros_like(audio)

		if movement_pattern == 'circle':
			# 圆形移动：0° -> 360°
			for i in range(audio.shape[1]):
				progress = i / audio.shape[1]
				angle = progress * 360
				frame_audio = audio[:, i:i + 1]
				processed_frame = self.apply_hrtf(frame_audio, angle)
				output[:, i:i + 1] += processed_frame

		elif movement_pattern == 'left_to_right':
			# 从左到右移动：-90° -> 90°
			for i in range(audio.shape[1]):
				progress = i / audio.shape[1]
				angle = -90 + progress * 180
				frame_audio = audio[:, i:i + 1]
				processed_frame = self.apply_hrtf(frame_audio, angle)
				output[:, i:i + 1] += processed_frame

		elif movement_pattern == 'front_to_back':
			# 从前到后移动：距离变化
			for i in range(audio.shape[1]):
				progress = i / audio.shape[1]
				distance = 0.5 + progress * 2.0  # 从0.5米到2.5米
				frame_audio = audio[:, i:i + 1]
				processed_frame = self.apply_hrtf(frame_audio, 0, distance)
				output[:, i:i + 1] += processed_frame

		return output

	def process_audio_3d(self, input_file, output_file,
						 movement_pattern='circle', add_reverb=True):
		"""处理音频文件并添加3D环绕效果"""
		print(f"处理音频文件: {input_file}")

		# 加载音频
		audio, sr = self.load_audio(input_file)
		print(f"音频信息: {audio.shape} 采样率: {sr}")

		# 应用3D移动效果
		print("应用3D环绕效果...")
		processed_audio = self.simulate_3d_movement(audio,
													movement_pattern)

		# 添加混响（可选）
		if add_reverb:
			print("添加混响效果...")
			processed_audio = self.create_reverb(processed_audio)

		# 归一化音频
		max_val = np.max(np.abs(processed_audio))
		if max_val > 0:
			processed_audio = processed_audio / max_val * 0.9

		# 保存结果
		sf.write(output_file, processed_audio.T, sr)
		print(f"3D音频已保存: {output_file}")

		return processed_audio, sr


# 使用示例
def main():
	# 创建处理器
	processor = Audio3DProcessor()

	# 处理音频文件
	input_file = "test_xingchahui.mp3"  # 替换为你的MP3文件路径
	output_file = "three_dimension.wav"

	try:
		processed_audio, sr = processor.process_audio_3d(
			input_file,
			output_file,
			movement_pattern='circle',
			# 尝试 'circle', 'left_to_right', 'front_to_back'
			add_reverb=True
		)
		print("处理完成！")
	except Exception as e:
		print(f"处理过程中出错: {e}")


if __name__ == "__main__":
	main()