#include <iostream>
#include <vector>
#include <cmath>
#include <portaudio.h>
#include <cstdint>
#include <complex>
#include <valarray>
#include <algorithm>
#include <cstdio>

// 解决MSVC下M_PI未定义的问题
#if defined(_MSC_VER) && !defined(_USE_MATH_DEFINES)
#define _USE_MATH_DEFINES
#include <cmath>
#endif

// 音频基础配置
const int SAMPLE_RATE = 44100;
const int CHANNELS = 1;
const int FRAMES_PER_BUFFER = 512;  // 必须是2的幂（FFT要求）
const float SILENCE_THRESHOLD = 0.001f;
const float SILENCE_DURATION = 5.0f; // 连续静音5秒停止
const int BITS_PER_SAMPLE = 16;
const int NOISE_SAMPLE_DURATION = 1; // 噪声采样时长（秒）
const float NOISE_REDUCE_FACTOR = 0.8f; // 降噪强度（0~1）

// 计算5秒对应的静音帧数（核心修改）
const int SILENCE_FRAMES = static_cast<int>(SILENCE_DURATION * SAMPLE_RATE / FRAMES_PER_BUFFER);

// 全局数据结构
struct AudioData {
    std::vector<int16_t> buffer;
    std::vector<float> noise_profile;
    int silence_counter = 0;
    bool is_recording = true;
    bool noise_calibrated = false;
};

// WAV文件头结构
struct WavHeader {
    char riff[4] = {'R', 'I', 'F', 'F'};
    uint32_t file_size;
    char wave[4] = {'W', 'A', 'V', 'E'};
    char fmt[4] = {'f', 'm', 't', ' '};
    uint32_t fmt_size = 16;
    uint16_t format = 1;
    uint16_t channels = CHANNELS;
    uint32_t sample_rate = SAMPLE_RATE;
    uint32_t byte_rate = SAMPLE_RATE * CHANNELS * BITS_PER_SAMPLE / 8;
    uint16_t block_align = CHANNELS * BITS_PER_SAMPLE / 8;
    uint16_t bits_per_sample = BITS_PER_SAMPLE;
    char data[4] = {'d', 'a', 't', 'a'};
    uint32_t data_size;
};

// ===================== 降噪核心函数 =====================
using Complex = std::complex<float>;
using CArray = std::valarray<Complex>;

// FFT实现
void fft(CArray& x) {
    const size_t N = x.size();
    if (N <= 1) return;

    CArray even = x[std::slice(0, N/2, 2)];
    CArray  odd = x[std::slice(1, N/2, 2)];

    fft(even);
    fft(odd);

    for (size_t k = 0; k < N/2; ++k) {
        float theta = static_cast<float>(-2 * M_PI * k / N);
        Complex t = std::polar(1.0f, theta) * odd[k];
        x[k] = even[k] + t;
        x[k + N/2] = even[k] - t;
    }
}

// 逆FFT
void ifft(CArray& x) {
    x = x.apply(std::conj);
    fft(x);
    x = x.apply(std::conj);
    x /= static_cast<float>(x.size());
}

// 采集环境噪声模板
void calibrate_noise(const std::vector<int16_t>& noise_samples, std::vector<float>& noise_profile) {
    std::vector<float> float_samples;
    float_samples.reserve(noise_samples.size());
    for (int16_t s : noise_samples) {
        float_samples.push_back(static_cast<float>(s) / 32768.0f);
    }

    const int frame_size = FRAMES_PER_BUFFER;
    noise_profile.assign(frame_size, 0.0f);
    
    std::vector<Complex> temp_complex(frame_size);
    CArray frame(temp_complex.data(), frame_size);

    int frame_count = static_cast<int>(float_samples.size()) / frame_size;
    for (int i = 0; i < frame_count; ++i) {
        for (int j = 0; j < frame_size; ++j) {
            frame[j] = Complex(float_samples[i * frame_size + j], 0.0f);
        }

        fft(frame);

        for (int j = 0; j < frame_size; ++j) {
            noise_profile[j] += std::abs(frame[j]);
        }
    }

    for (float& val : noise_profile) {
        val /= static_cast<float>(frame_count);
    }

    std::cout << "噪声校准完成！" << std::endl;
}

// 实时降噪处理
void denoise_audio_frame(int16_t* samples, int frame_count, const std::vector<float>& noise_profile) {
    if (noise_profile.empty() || frame_count != FRAMES_PER_BUFFER) {
        return;
    }

    std::vector<float> float_samples;
    float_samples.reserve(frame_count);
    for (int i = 0; i < frame_count; ++i) {
        float_samples.push_back(static_cast<float>(samples[i]) / 32768.0f);
    }

    std::vector<Complex> temp_complex(frame_count);
    CArray frame(temp_complex.data(), frame_count);
    
    for (int j = 0; j < frame_count; ++j) {
        frame[j] = Complex(float_samples[j], 0.0f);
    }

    fft(frame);

    // 频谱减法降噪
    for (int i = 0; i < frame_count; ++i) {
        float amp = std::abs(frame[i]);
        float noise_amp = noise_profile[i] * NOISE_REDUCE_FACTOR;
        
        if (amp < noise_amp) {
            frame[i] = 0.0f;
        } else {
            frame[i] *= (amp - noise_amp) / amp;
        }
    }

    ifft(frame);

    // 转换回16位采样
    for (int i = 0; i < frame_count; ++i) {
        float denoised = frame[i].real();
        
        if (std::fabs(denoised) < SILENCE_THRESHOLD * 0.5f) {
            denoised = 0.0f;
        }
        
        denoised = std::max(-1.0f, std::min(denoised, 1.0f));
        
        int32_t temp = static_cast<int32_t>(denoised * 32768.0f);
        temp = std::max<int32_t>(-32768, std::min<int32_t>(temp, 32767));
        samples[i] = static_cast<int16_t>(temp);
    }
}

// ===================== 原有功能（适配5秒静音） =====================
float calculate_rms(const int16_t* samples, int frame_count) {
    float sum = 0.0f;
    for (int i = 0; i < frame_count; ++i) {
        float sample = static_cast<float>(samples[i]) / 32768.0f;
        sum += sample * sample;
    }
    return std::sqrt(sum / static_cast<float>(frame_count));
}

int audio_callback(const void* input_buffer, void* output_buffer,
                   unsigned long frame_count,
                   const PaStreamCallbackTimeInfo* time_info,
                   PaStreamCallbackFlags status_flags,
                   void* user_data) {
    AudioData* data = static_cast<AudioData*>(user_data);
    if (!data->is_recording) {
        return paComplete;
    }

    const int16_t* input_samples = static_cast<const int16_t*>(input_buffer);
    std::vector<int16_t> temp_samples(input_samples, input_samples + frame_count);

    if (!data->noise_calibrated) {
        static std::vector<int16_t> noise_samples;
        noise_samples.insert(noise_samples.end(), temp_samples.begin(), temp_samples.end());
        
        int target_samples = SAMPLE_RATE * NOISE_SAMPLE_DURATION;
        std::cout << "\r正在采集环境噪声... " << noise_samples.size() << "/" << target_samples << std::flush;
        
        if (static_cast<int>(noise_samples.size()) >= target_samples) {
            calibrate_noise(noise_samples, data->noise_profile);
            data->noise_calibrated = true;
            noise_samples.clear();
            // 提示信息更新为5秒静音
            std::cout << "\n开始录制音频（已开启降噪，连续" << SILENCE_DURATION << "秒无声音后停止）..." << std::endl;
        }
        return paContinue;
    }

    denoise_audio_frame(temp_samples.data(), static_cast<int>(frame_count), data->noise_profile);

    float rms = calculate_rms(temp_samples.data(), static_cast<int>(frame_count));
    if (rms < SILENCE_THRESHOLD) {
        data->silence_counter++;
        // 显示剩余静音时间（更友好）
        float current_silence = static_cast<float>(data->silence_counter * FRAMES_PER_BUFFER) / SAMPLE_RATE;
        std::cout << "\r当前静音时长: " << current_silence << "秒 / " << SILENCE_DURATION << "秒" << std::flush;
        
        if (data->silence_counter >= SILENCE_FRAMES) {
            data->is_recording = false;
            std::cout << "\n检测到连续" << SILENCE_DURATION << "秒无声音，停止录制！" << std::endl;
            return paComplete;
        }
    } else {
        data->silence_counter = 0; // 有声音则重置计数器
    }

    data->buffer.insert(data->buffer.end(), temp_samples.begin(), temp_samples.end());
    return paContinue;
}

bool save_wav(const std::string& filename, const std::vector<int16_t>& data) {
    FILE* file = fopen(filename.c_str(), "wb");
    if (!file) {
        std::cerr << "无法创建WAV文件: " << filename << std::endl;
        return false;
    }

    WavHeader header;
    header.data_size = static_cast<uint32_t>(data.size() * sizeof(int16_t));
    header.file_size = 36 + header.data_size;

    fwrite(&header, sizeof(WavHeader), 1, file);
    fwrite(data.data(), sizeof(int16_t), data.size(), file);
    fclose(file);

    std::cout << "降噪后的音频已保存为: " << filename 
              << " (大小: " << header.file_size << " 字节)" << std::endl;
    return true;
}
#include "ros/ros.h"
int main(int argc,char* argv[]){
    ros::init(argc, argv, "tts_node");
    PaError err = Pa_Initialize();
    if (err != paNoError) {
        std::cerr << "PortAudio初始化失败: " << Pa_GetErrorText(err) << std::endl;
        return -1;
    }

    PaStreamParameters input_params;
    input_params.device = Pa_GetDefaultInputDevice();
    if (input_params.device == paNoDevice) {
        std::cerr << "未找到音频输入设备！" << std::endl;
        Pa_Terminate();
        return -1;
    }

    input_params.channelCount = CHANNELS;
    input_params.sampleFormat = paInt16;
    input_params.suggestedLatency = Pa_GetDeviceInfo(input_params.device)->defaultLowInputLatency;
    input_params.hostApiSpecificStreamInfo = nullptr;

    AudioData audio_data;
    PaStream* stream = nullptr;

    err = Pa_OpenStream(&stream,
                        &input_params,
                        nullptr,
                        SAMPLE_RATE,
                        FRAMES_PER_BUFFER,
                        paClipOff,
                        audio_callback,
                        &audio_data);
    if (err != paNoError) {
        std::cerr << "打开音频流失败: " << Pa_GetErrorText(err) << std::endl;
        Pa_Terminate();
        return -1;
    }

    std::cout << "准备开始（请保持环境安静，先采集" << NOISE_SAMPLE_DURATION << "秒噪声）..." << std::endl;
    err = Pa_StartStream(stream);
    if (err != paNoError) {
        std::cerr << "启动音频流失败: " << Pa_GetErrorText(err) << std::endl;
        Pa_CloseStream(stream);
        Pa_Terminate();
        return -1;
    }

    while (Pa_IsStreamActive(stream)) {
        Pa_Sleep(100);
    }

    Pa_CloseStream(stream);
    Pa_Terminate();

    if (!audio_data.buffer.empty()) {
        save_wav("denoised_recording.wav", audio_data.buffer);
    } else {
        std::cout << "未采集到有效音频数据！" << std::endl;
    }

    return 0;
}