import pyaudio
import wave
import threading
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from collections import deque
import struct
from data_control import data_callback

class RealTimeAudioProcessor:
    def __init__(self, format=pyaudio.paInt16, channels=1, rate=44100, chunk=1024):
        """
        初始化实时音频处理器
        """
        self.format = format
        self.channels = channels
        self.rate = rate
        self.chunk = chunk
        
        # 初始化PyAudio
        self.audio = pyaudio.PyAudio()
        
        # 录制状态变量
        self.is_recording = False
        self.stream = None
        self.frames = []
        self.filename = ""
        self.record_thread = None
        
        # 实时分析相关
        self.analysis_callback = None
        self.waveform_data = deque(maxlen=self.rate//chunk*2)  # 存储约2秒的波形数据
        
        # 节拍检测相关
        self.beat_history = deque(maxlen=50)  # 存储近期的音频能量值
        self.beat_threshold = 0  # 动态节拍阈值
        self.beat_sensitivity = 1.5  # 节拍检测灵敏度
        self.last_beat_time = 0  # 上次检测到节拍的时间
        self.min_beat_interval = 0.1  # 最小节拍间隔（秒）
        
        print("实时音频处理器初始化完成")
        
    def set_analysis_callback(self, callback):
        """
        设置实时分析回调函数
        
        Parameters:
        callback: 回调函数，接收音频数据和特征参数
        """
        self.analysis_callback = callback
        
    def record_audio(self, seconds=5, filename="recorded_audio.wav"):
        """
        录制音频（原有方法保持兼容）
        
        Parameters:
        seconds: 录制时长（秒）
        filename: 保存的文件名
        """
        print("开始录制...")
        stream = self.audio.open(format=self.format,
                                channels=self.channels,
                                rate=self.rate,
                                input=True,
                                frames_per_buffer=self.chunk)
        
        frames = []
        for i in range(0, int(self.rate / self.chunk * seconds)):
            data = stream.read(self.chunk)
            frames.append(data)
            
            # 实时分析
            if self.analysis_callback:
                self._analyze_data(data)
            
        print("录制完成!")
        
        # 停止并关闭流
        stream.stop_stream()
        stream.close()
        
        # 保存音频文件
        wf = wave.open(filename, 'wb')
        wf.setnchannels(self.channels)
        wf.setsampwidth(self.audio.get_sample_size(self.format))
        wf.setframerate(self.rate)
        wf.writeframes(b''.join(frames))
        wf.close()
        
        return filename
        
    def start_recording(self, filename="recorded_audio.wav"):
        """
        开始录制音频（非阻塞，用于GUI控制）
        
        Parameters:
        filename: 保存的文件名
        """
        if self.is_recording:
            print("音频已在录制中")
            return True
            
        self.filename = filename
        self.is_recording = True
        self.frames = []
        
        # 在后台线程中开始录制
        self.record_thread = threading.Thread(target=self._record_audio)
        self.record_thread.daemon = True
        self.record_thread.start()
        print("音频录制线程已启动")
        return True
        
    def _record_audio(self):
        """
        实际的录制逻辑（在后台线程运行）
        """
        print("开始录制...")
        try:
            self.stream = self.audio.open(
                format=self.format,
                channels=self.channels,
                rate=self.rate,
                input=True,
                frames_per_buffer=self.chunk
            )
            print("音频流已打开")
        except Exception as e:
            print(f"无法打开音频流: {e}")
            self.recording = False
            return
            
        while self.is_recording:
            try:
                data = self.stream.read(self.chunk)
                self.frames.append(data)
                
                # 实时分析
                if self.analysis_callback:
                    self._analyze_data(data)
                    
            except Exception as e:
                print(f"录制错误: {e}")
                break
                
        # 停止并关闭流
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
            
        # 保存音频文件
        if self.frames:
            self._save_audio_file()
        print("录制完成!")
        
    def _analyze_data(self, data):
        """
        分析音频数据并提取特征
        
        Parameters:
        data: 音频数据块
        """
        # 将字节数据转换为numpy数组
        if self.format == pyaudio.paInt16:
            audio_data = np.frombuffer(data, dtype=np.int16)
        else:
            # 对于其他格式可能需要不同的处理
            audio_data = np.frombuffer(data, dtype=np.int16)
            
        # 计算基本特征
        features = self._extract_features(audio_data)
        
        # 检测节拍
        is_beat = self._detect_beat(features)
        features['is_beat'] = is_beat
        
        # 调用回调函数
        if self.analysis_callback:
            self.analysis_callback(audio_data, features)
            
        # 存储波形数据用于可视化
        self.waveform_data.extend(audio_data)
        
    def _extract_features(self, audio_data):
        """
        提取音频特征
        
        Parameters:
        audio_data: 音频数据numpy数组
        
        Returns:
        dict: 包含各种音频特征的字典
        """
        # 基本特征
        amplitude_max = np.max(np.abs(audio_data))
        amplitude_rms = np.sqrt(np.mean(audio_data**2))
        
        # 计算零交叉率
        zero_crossings = np.where(np.diff(np.sign(audio_data)))[0]
        zero_crossing_rate = len(zero_crossings) / len(audio_data)
        
        # 计算频谱特征（简单版本）
        fft_data = np.fft.fft(audio_data)
        magnitude = np.abs(fft_data[:len(fft_data)//2])
        freqs = np.fft.fftfreq(len(audio_data), 1/self.rate)[:len(fft_data)//2]
        
        # 主要频率成分
        dominant_freq = freqs[np.argmax(magnitude)] if len(magnitude) > 0 else 0
        
        # 频谱质心
        spectral_centroid = np.sum(freqs * magnitude) / np.sum(magnitude) if np.sum(magnitude) > 0 else 0
        
        return {
            'amplitude_max': amplitude_max,
            'amplitude_rms': amplitude_rms,
            'zero_crossing_rate': zero_crossing_rate,
            'dominant_frequency': dominant_freq,
            'spectral_centroid': spectral_centroid,
            'timestamp': time.time()
        }
        
    def _detect_beat(self, features):
        """
        检测节拍
        
        Parameters:
        features: 音频特征字典
        
        Returns:
        bool: 是否检测到节拍
        """
        current_time = features['timestamp']
        amplitude_rms = features['amplitude_rms']
        
        # 更新节拍历史
        self.beat_history.append(amplitude_rms)
        
        # 计算动态阈值（近期平均能量的倍数）
        if len(self.beat_history) > 5:
            avg_energy = np.mean(list(self.beat_history)[-10:])
            self.beat_threshold = avg_energy * self.beat_sensitivity
            
        # 检查是否满足节拍条件
        is_beat = (
            amplitude_rms > self.beat_threshold and
            (current_time - self.last_beat_time) > self.min_beat_interval
        )
        
        if is_beat:
            self.last_beat_time = current_time
            
        return is_beat
        
    def stop_recording(self):
        """
        停止录制
        """
        if not self.is_recording:
            return self.filename
            
        print("正在停止音频录制...")
        self.is_recording = False
        if self.record_thread and self.record_thread.is_alive():
            self.record_thread.join(timeout=2)  # 等待最多2秒
        print("音频录制已停止")
        return self.filename
        
    def _save_audio_file(self):
        """
        保存音频文件
        """
        try:
            wf = wave.open(self.filename, 'wb')
            wf.setnchannels(self.channels)
            wf.setsampwidth(self.audio.get_sample_size(self.format))
            wf.setframerate(self.rate)
            wf.writeframes(b''.join(self.frames))
            wf.close()
        except Exception as e:
            print(f"保存音频文件失败: {e}")
            
    def is_recording(self):
        """
        检查当前是否正在录制
        
        Returns:
        bool: 是否正在录制
        """
        return self.is_recording
        
    def get_waveform_data(self):
        """
        获取当前存储的波形数据用于可视化
        
        Returns:
        deque: 波形数据
        """
        return self.waveform_data
        
    def __del__(self):
        """
        析构函数，确保资源被正确释放
        """
        if self.audio:
            self.audio.terminate()

# 实时波形可视化类
class RealTimeWaveformVisualizer:
    def __init__(self, audio_processor, max_points=1000):
        """
        初始化实时波形可视化器
        
        Parameters:
        audio_processor: 音频处理器实例
        max_points: 显示的最大数据点数
        """
        self.audio_processor = audio_processor
        self.max_points = max_points
        
        # 设置matplotlib
        plt.ion()  # 开启交互模式
        self.fig, (self.ax1, self.ax2) = plt.subplots(2, 1, figsize=(10, 6))
        self.fig.suptitle('实时音频波形和特征')
        
        # 波形图
        self.line1, = self.ax1.plot([], [], 'b-', linewidth=0.5)
        self.ax1.set_ylabel('振幅')
        self.ax1.set_title('实时波形')
        self.ax1.grid(True)
        
        # 特征图（显示RMS幅度）
        self.rms_values = deque(maxlen=100)
        self.time_values = deque(maxlen=100)
        self.line2, = self.ax2.plot([], [], 'r-', linewidth=1)
        self.ax2.set_xlabel('时间 (s)')
        self.ax2.set_ylabel('RMS幅度')
        self.ax2.set_title('实时RMS幅度')
        self.ax2.grid(True)
        
        self.start_time = time.time()
        
    def update_plot(self, frame):
        """
        更新波形图
        """
        # 更新波形图
        waveform_data = list(self.audio_processor.get_waveform_data())
        if waveform_data:
            if len(waveform_data) > self.max_points:
                # 如果数据点太多，进行下采样
                step = len(waveform_data) // self.max_points
                if step > 1:
                    waveform_data = waveform_data[::step]
                    
            self.line1.set_data(range(len(waveform_data)), waveform_data)
            self.ax1.relim()
            self.ax1.autoscale_view()
            
        # 更新特征图
        # 注意：这里需要从外部获取最新的特征数据
        # 在实际应用中，您可能需要通过回调函数传递这些数据
        
        return self.line1, self.line2
        
    def start_visualization(self):
        """
        开始实时可视化
        """
        self.ani = FuncAnimation(self.fig, self.update_plot, interval=50, blit=False)
        plt.show(block=False)
        
    def update_features(self, rms_value):
        """
        更新特征图
        
        Parameters:
        rms_value: RMS幅度值
        """
        current_time = time.time() - self.start_time
        self.rms_values.append(rms_value)
        self.time_values.append(current_time)
        
        self.line2.set_data(list(self.time_values), list(self.rms_values))
        self.ax2.relim()
        self.ax2.autoscale_view()

# 示例分析回调函数
def analysis_callback(audio_data, features):
    """
    示例分析回调函数
    
    Parameters:
    audio_data: 音频数据
    features: 提取的特征
    """
    # 只在检测到节拍时打印音频特征
    if features.get('is_beat', False):
        print(f"实时特征 - 最大幅度: {features['amplitude_max']:6.0f}, "
              f"RMS: {features['amplitude_rms']:6.2f}, "
              f"主频: {features['dominant_frequency']:6.0f}Hz, "
              f"过零率: {features['zero_crossing_rate']:.3f}")
    
    # 如果检测到节拍，打印节拍信息
    if features.get('is_beat', False):
        print(">>> 检测到节拍! <<<")
    
    # 将数据传输给data_control模块进行封装
    data_callback(audio_data, features)

# 使用示例
if __name__ == "__main__":
    # 创建实时音频处理器
    processor = RealTimeAudioProcessor()
    
    # 设置分析回调
    processor.set_analysis_callback(analysis_callback)
    
    # 方式1: 命令行控制录制
    print("开始录制5秒音频...")
    processor.record_audio(seconds=5, filename="realtime_recorded.wav")
    
    # 方式2: 实时录制和分析（GUI模式）
    # print("开始实时录制...")
    # processor.start_recording("realtime_recorded.wav")
    # 
    # # 模拟录制10秒
    # time.sleep(10)
    # 
    # # 停止录制
    # filename = processor.stop_recording()
    # print(f"录制完成，保存为: {filename}")