import numpy as np
import soundfile as sf
from scipy.optimize import minimize


# 定义异常声音片段检测函数
def detect_abnormal_segments(audio_data, threshold, window_size):
    abnormal_segments = []  # 保存异常声音片段的起始和终止索引
    num_samples = len(audio_data)

    for start_idx in range(0, num_samples, window_size):
        end_idx = start_idx + window_size
        if end_idx > num_samples:
            end_idx = num_samples

        audio_segment = audio_data[start_idx:end_idx]
        energy = calculate_energy(audio_segment)

        if energy > threshold:
            abnormal_segments.append((start_idx, end_idx))

    return abnormal_segments

# Step 1: 异常声音片段的能量计算
def calculate_energy(audio_segment):
    energy = np.sum(audio_segment ** 2) / len(audio_segment)
    return energy

def merge_abnormal_segments(abnormal_segments, merge_threshold):
    merged_segments = []
    if len(abnormal_segments) == 0:
        return merged_segments

    current_segment = abnormal_segments[0]
    for next_segment in abnormal_segments[1:]:
        if next_segment[0] - current_segment[1] <= merge_threshold:
            current_segment = (current_segment[0], next_segment[1])
        else:
            merged_segments.append(current_segment)
            current_segment = next_segment

    merged_segments.append(current_segment)
    return merged_segments


# 定义计算频谱的函数（示例，需要根据实际情况进行修改）
def calculate_spectrum(audio_signal):
    # 使用快速傅里叶变换（FFT）计算频谱
    spectrum = np.fft.fft(audio_signal)
    return spectrum


# Step 2: 时延信息获取
def calculate_time_delays(sensor_positions, source_position, speed_of_sound):
    distances = np.linalg.norm(sensor_positions - source_position, axis=1)  # 求范数
    time_delays = distances / speed_of_sound
    return time_delays

# Step 3: 波束成形
def apply_beamforming(audio_data, sensor_positions, time_delays, source_angle):
    num_sensors = len(sensor_positions)
    num_samples = len(audio_data)
    beamformed_audio = np.zeros(num_samples)
    sample_rate = 44100  # 你需要根据实际情况设置采样率

    for i in range(num_samples):
        time_shifts = np.exp(-1j * 2 * np.pi * time_delays * (i / sample_rate))
        weights = np.exp(1j * 2 * np.pi * source_angle * np.arange(num_sensors))
        sensor_signals = audio_data[i] * time_shifts
        beamformed_audio[i] = np.sum(sensor_signals * weights)




# Step 4: 球面插值定位法
def spherical_interpolation(sensor_positions, predicted_energy, actual_energy):
    def residual_function(x, *args):
        source_position = x[:3]
        distance_to_source = np.linalg.norm(sensor_positions - source_position, axis=1)
        predicted_energy = x[3] * distance_to_source**2
        residuals = np.log(predicted_energy / actual_energy)
        return np.sum(residuals ** 2)

    num_sensors = len(sensor_positions)
    source_position = np.array([0, 0, 0])  # 假设源的初始位置
    initial_guess = np.concatenate((source_position, [1]))  # 初始猜测

    result = minimize(residual_function, initial_guess, args=(sensor_positions, predicted_energy, actual_energy))
    estimated_source = result.x[:3]
    return estimated_source

# 后续步骤：判断异常声源坐标是否在设备区域
def is_within_device_area(estimated_source):
    # 假设设备的三维区域是一个立方体，定义各个边界
    x_min, x_max = 0, 10
    y_min, y_max = 0, 10
    z_min, z_max = 0, 5

    x_est, y_est, z_est = estimated_source
    if x_min <= x_est <= x_max and y_min <= y_est <= y_max and z_min <= z_est <= z_max:
        return True
    else:
        return False

# 加载传感器位置信息
def load_sensor_positions(filename):
    sensor_positions = []  # 以列表形式保存传感器位置
    with open(filename, "r") as file:
        for line in file:
            x, y, z = map(float, line.strip().split())
            sensor_positions.append([x, y, z])
    return np.array(sensor_positions)



# 定义计算预测能量的函数
def calculate_predicted_energy(beamformed_audio):
    # 假设你已经实现了一个函数来进行频域分析，例如 calculate_spectrum
    spectrum = calculate_spectrum(beamformed_audio)  # TOD
    predicted_energy = np.sum(np.abs(spectrum))  # 使用频域信号的幅值和作为预测能量
    return predicted_energy

# 定义计算实际能量的函数
def calculate_actual_energy(beamformed_audio, source_angles,sensor_positions, time_delays):
    actual_energy = np.zeros(len(source_angles))
    for i, angle in enumerate(source_angles):
        # 假设你已经实现了一个函数来进行波束成形，例如 apply_beamforming
        directional_audio = apply_beamforming(beamformed_audio, sensor_positions, time_delays, angle) #
        spectrum = calculate_spectrum(directional_audio)
        actual_energy[i] = np.sum(np.abs(spectrum))  # 使用频域信号的幅值和作为实际能量
    return actual_energy

# 主程序
def main():
    # 读取音频数据和传感器位置等信息
    audio_files = ["sensor1.wav", "sensor2.wav", "sensor3.wav"]  # 三个传感器采集的音频文件

    sensor_positions = load_sensor_positions("sensor_positions.txt")  # 你需要定义或引入 load_sensor_positions 函数
    source_positions = []  # 你需要定义源的坐标
    speed_of_sound = 343  # 声音传播速度，单位：m/s

    for audio_file in audio_files:
        audio_data, sample_rate = sf.read(audio_file)

        # Step 1: 异常声音片段检测
        threshold = 0.1  # 能量阈值
        window_size = 100  # 窗口大小
        abnormal_segments = detect_abnormal_segments(audio_data, threshold, window_size)

        # 合并异常片段
        merge_threshold = 100  # 假设合并的时间阈值为100个采样点
        merged_abnormal_segments = merge_abnormal_segments(abnormal_segments, merge_threshold)
        # 输出合并后的异常片段
        for start_idx, end_idx in merged_abnormal_segments:
            print(f"Merged abnormal segment: Start {start_idx}, End {end_idx}")

        # 计算每个异常声音片段的能量并打印
        for start_idx, end_idx in abnormal_segments:
            audio_segment = audio_data[start_idx:end_idx]
            energy = calculate_energy(audio_segment)
            print(f"Abnormal segment energy: {energy}")

        # Step 2: 时延信息获取
        time_delays = calculate_time_delays(sensor_positions, source_positions[-1], speed_of_sound)

        # Step 3: 波束成形
        source_angle = 0  # 假设源的初始角度
        beamformed_audio = apply_beamforming(audio_data, sensor_positions, time_delays, source_angle)

        # Step 4: 球型插值定位法
        source_angles = np.radians(np.arange(0, 360, 1))  # 假设搜索的方向角范围
        predicted_energy = calculate_predicted_energy(beamformed_audio)  # 你需要定义 calculate_predicted_energy 函数
        actual_energy = calculate_actual_energy(beamformed_audio, source_angles, sensor_positions, time_delays)  # 你需要定义 calculate_actual_energy 函数
        estimated_source = spherical_interpolation(sensor_positions, predicted_energy, actual_energy)

        # 后续步骤：判断异常声源坐标是否在设备区域,计算多次定位结果的平均值
        if is_within_device_area(estimated_source):
            source_positions.append(estimated_source)

        if source_positions:
            average_source_position = np.mean(source_positions, axis=0)
            print("平均声源位置:", average_source_position)
        else:
            print("未检测到异常声源。")

if __name__ == "__main__":
    main()



