import tkinter as tk
from tkinter import ttk
import pyaudio
import numpy as np
from PIL import Image, ImageTk, ImageGrab
import time
import threading
import os
import sys
import cv2  # For video
import requests  # For downloading model assets
import csv  # For parsing class map

# --- AI and ML Imports ---
try:
    import tensorflow as tf
    import tensorflow_hub as hub
    AI_ENABLED = True
except ImportError:
    print("警告: TensorFlow 未安装, AI功能将被禁用。请运行: pip install tensorflow tensorflow_hub")
    AI_ENABLED = False

# --- 配置 ---
DB_THRESHOLD = 60
AUDIO_DEVICE_INDEX = None
SCREENSHOT_DIR = "screenshots"

# --- AI模型配置 ---
# YAMNet模型地址
MODEL_URL = "https://tfhub.dev/google/yamnet/1"
# YAMNet声音类别文件地址
CLASS_MAP_URL = "https://raw.githubusercontent.com/tensorflow/models/master/research/audioset/yamnet/yamnet_class_map.csv"

# 定义需要特别关注的关键声音 (必须与YAMNet输出的英文名完全一致)
CRITICAL_SOUNDS = [
    'Siren', 'Civil defense siren', 'Alarm clock', 'Fire alarm',
    'Smoke detector, smoke alarm', 'Glass', 'Explosion', 'Gunshot, gunfire', 'Screaming'
]

# --- 全局状态 ---
is_monitoring = False

class WidgetLogger:
    def __init__(self, widget):
        self.widget = widget
    def write(self, text):
        self.widget.insert(tk.END, text)
        self.widget.see(tk.END)
    def flush(self):
        pass

class SoundMonitorApp(tk.Tk):
    """主应用类"""
    def __init__(self):
        super().__init__()
        self.title("AI智能声音与环境监控系统")
        self.geometry("1200x700")

        self.p = None
        self.stream = None
        self.monitor_thread = None
        self.image_label = None
        self.log_text = None
        self.cap = None
        self.video_thread = None

        # AI-related attributes
        self.yamnet_model = None
        self.yamnet_classes = []
        self.ai_sound_label = None
        
        self.create_widgets()
        self.redirect_logging()

        if AI_ENABLED:
            self.load_ai_model()

    def create_widgets(self):
        """创建界面组件"""
        # --- Left Panel (Controls) ---
        control_panel = ttk.Frame(self, width=250)
        control_panel.pack(side=tk.LEFT, fill=tk.Y, padx=10, pady=10)
        control_panel.pack_propagate(False)

        self.db_label = ttk.Label(control_panel, text="分贝: 0 dB", font=("Helvetica", 24))
        self.db_label.pack(pady=20)

        threshold_frame = ttk.Frame(control_panel)
        threshold_frame.pack(pady=10)
        ttk.Label(threshold_frame, text="报警阈值 (dB):").pack(side=tk.LEFT, padx=5)
        self.threshold_entry = ttk.Entry(threshold_frame, width=5)
        self.threshold_entry.insert(0, str(DB_THRESHOLD))
        self.threshold_entry.pack(side=tk.LEFT)

        self.toggle_button = ttk.Button(control_panel, text="开始监控", command=self.toggle_monitoring)
        self.toggle_button.pack(pady=15)

        self.status_label = ttk.Label(control_panel, text="状态: 已停止", foreground="red", font=("Helvetica", 12))
        self.status_label.pack(pady=10)

        # --- AI Analysis Section ---
        if AI_ENABLED:
            ai_frame = ttk.LabelFrame(control_panel, text="AI声音分析")
            ai_frame.pack(pady=20, padx=10, fill=tk.X)
            ttk.Label(ai_frame, text="识别到的声音:").pack(anchor='w', padx=5)
            self.ai_sound_label = ttk.Label(ai_frame, text="---", font=("Helvetica", 11), foreground="blue", wraplength=200)
            self.ai_sound_label.pack(pady=5, padx=5, anchor='w')

        # --- Right Panel (Logs) ---
        log_panel = ttk.Frame(self, width=350)
        log_panel.pack(side=tk.RIGHT, fill=tk.Y, padx=(0, 10), pady=10)
        log_panel.pack_propagate(False)
        log_frame = ttk.LabelFrame(log_panel, text="实时日志")
        log_frame.pack(fill=tk.BOTH, expand=True)
        self.log_text = tk.Text(log_frame, wrap=tk.WORD)
        self.log_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
        log_scrollbar = ttk.Scrollbar(log_frame, orient=tk.VERTICAL, command=self.log_text.yview)
        log_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
        self.log_text.config(yscrollcommand=log_scrollbar.set)

        # --- Center Panel (Video) ---
        video_panel = ttk.Frame(self)
        video_panel.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, pady=10)
        self.image_label = ttk.Label(video_panel, background='grey')
        self.image_label.pack(fill=tk.BOTH, expand=True)

    def redirect_logging(self):
        logger = WidgetLogger(self.log_text)
        sys.stdout = logger
        sys.stderr = logger

    def load_ai_model(self):
        """在后台线程中加载AI模型以避免冻结GUI"""
        threading.Thread(target=self._load_model_thread, daemon=True).start()

    def _load_model_thread(self):
        """模型加载的实际逻辑"""
        try:
            print("正在加载AI声音分类模型 (YAMNet)...")
            self.yamnet_model = hub.load(MODEL_URL)
            print("AI模型加载成功。")
            
            print("正在加载声音类别...")
            response = requests.get(CLASS_MAP_URL)
            response.raise_for_status()
            csv_text = response.text
            reader = csv.reader(csv_text.splitlines())
            next(reader)  # Skip header
            self.yamnet_classes = [row[2] for row in reader]
            print(f"成功加载 {len(self.yamnet_classes)} 个声音类别。")
        except Exception as e:
            print(f"错误: 无法加载AI模型或类别: {e}")
            print("AI功能将不可用。请检查网络连接和TensorFlow安装。")
            self.yamnet_model = None

    def toggle_monitoring(self):
        global is_monitoring
        if is_monitoring:
            self.stop_monitoring()
        else:
            self.start_monitoring()

    def start_monitoring(self):
        global is_monitoring
        print("开始监控...")
        is_monitoring = True
        self.toggle_button.config(text="停止监控")
        self.status_label.config(text="状态: 监控中", foreground="green")

        if not os.path.exists(SCREENSHOT_DIR):
            os.makedirs(SCREENSHOT_DIR)

        self.monitor_thread = threading.Thread(target=self.audio_monitor_loop, daemon=True)
        self.monitor_thread.start()
        
        self.video_thread = threading.Thread(target=self.video_loop, daemon=True)
        self.video_thread.start()

    def stop_monitoring(self):
        global is_monitoring
        print("停止监控...")
        is_monitoring = False
        
        if self.video_thread and self.video_thread.is_alive(): 
            self.video_thread.join(timeout=1)
        if self.monitor_thread and self.monitor_thread.is_alive(): 
            self.monitor_thread.join(timeout=1)
        
        self.toggle_button.config(text="开始监控")
        self.status_label.config(text="状态: 已停止", foreground="red")
        self.db_label.config(text="分贝: 0 dB", foreground="black")
        if AI_ENABLED: 
            self.ai_sound_label.config(text="---")
        self.image_label.config(image='', background='grey')

    def video_loop(self):
        global is_monitoring
        self.cap = cv2.VideoCapture(0)
        if not self.cap.isOpened():
            print("错误: 无法打开摄像头。")
            return
        print("摄像头已打开。")
        while is_monitoring:
            ret, frame = self.cap.read()
            if not ret: 
                break
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.update_idletasks()
            w, h = self.image_label.winfo_width(), self.image_label.winfo_height()
            if w > 1 and h > 1: 
                frame = cv2.resize(frame, (w, h))
            img = Image.fromarray(frame)
            photo = ImageTk.PhotoImage(image=img)
            self.image_label.config(image=photo)
            self.image_label.image = photo
            time.sleep(0.03)
        if self.cap: 
            self.cap.release()
        print("摄像头已关闭。")

    def audio_monitor_loop(self):
        """重构后的音频监控循环，集成了AI分析"""
        global is_monitoring
        self.p = pyaudio.PyAudio()

        RATE = 16000
        FRAMES_PER_BUFFER = 1024
        INFERENCE_INTERVAL_SECONDS = 1.5
        BUFFER_SIZE = int(RATE * INFERENCE_INTERVAL_SECONDS)
        
        try:
            self.stream = self.p.open(format=pyaudio.paInt16, channels=1, rate=RATE,
                                      input=True, frames_per_buffer=FRAMES_PER_BUFFER,
                                      input_device_index=AUDIO_DEVICE_INDEX)
            print("音频流已打开 (16kHz, 用于AI分析)。")
            
            audio_buffer = np.array([], dtype=np.float32)
            last_screenshot_time = 0
            screenshot_cooldown = 5  # 5 seconds between screenshots
            
            while is_monitoring:
                try:
                    data = np.frombuffer(self.stream.read(FRAMES_PER_BUFFER, exception_on_overflow=False), dtype=np.int16)
                    float_data = data.astype(np.float32) / 32768.0
                    
                    # --- 实时分贝计算 (修改为正数) ---
                    rms = np.sqrt(np.mean(float_data**2))
                    if rms > 1e-9:
                        # 加上一个偏移量(如80)将dBFS(负值)转换为一个更直观的正值范围
                        db = max(0, 20 * np.log10(rms) + 80)
                    else:
                        db = 0
                    self.after(0, self.update_db_label, db)
                    
                    # Check for decibel threshold exceeded
                    threshold = float(self.threshold_entry.get())
                    current_time = time.time()
                    if db > threshold and (current_time - last_screenshot_time) > screenshot_cooldown:
                        self.after(0, self.trigger_alert, db, threshold, "高音量")
                        self.after(0, self.take_screenshot)
                        last_screenshot_time = current_time
                    
                    # --- AI分析 ---
                    if AI_ENABLED and self.yamnet_model and self.yamnet_classes:
                        audio_buffer = np.concatenate([audio_buffer, float_data])
                        
                        if len(audio_buffer) >= BUFFER_SIZE:
                            waveform = audio_buffer[:BUFFER_SIZE]
                            audio_buffer = audio_buffer[BUFFER_SIZE:]
                            
                            scores, embeddings, spectrogram = self.yamnet_model(waveform)
                            scores = scores.numpy().mean(axis=0)
                            top_class_index = scores.argmax()
                            top_class_name = self.yamnet_classes[top_class_index]
                            top_score = scores[top_class_index]
                            
                            self.after(0, self.update_ai_sound_label, top_class_name, top_score)
                            
                            # Check for critical sounds
                            if top_class_name in CRITICAL_SOUNDS and db > threshold:
                                self.after(0, self.trigger_alert, db, threshold, top_class_name)
                                # Only take screenshot if not already taken due to high volume
                                if (current_time - last_screenshot_time) > screenshot_cooldown:
                                    self.after(0, self.take_screenshot)
                                    last_screenshot_time = current_time

                except IOError:
                    if is_monitoring: 
                        print("音频流读取错误。")
                    break
                except Exception as e:
                    print(f"音频循环中出错: {e}")
                    break
        except Exception as e:
            print(f"无法打开音频流: {e}")
            self.after(0, self.stop_monitoring)
        finally:
            if self.stream and self.stream.is_active(): 
                self.stream.stop_stream()
                self.stream.close()
            if self.p: 
                self.p.terminate()
            print("音频流已关闭。")

    def update_db_label(self, db):
        self.db_label.config(text=f"分贝: {db:.2f} dB")
        threshold = float(self.threshold_entry.get())
        self.db_label.config(foreground="red" if db > threshold else "black")

    def update_ai_sound_label(self, sound_name, score):
        """更新AI识别到的声音标签"""
        self.ai_sound_label.config(text=f"{sound_name}\n(置信度: {score:.2f})")

    def trigger_alert(self, db, threshold, sound_name="未知"):
        """根据声音名称触发警报"""
        alert_text = f"紧急警报: 检测到 '{sound_name}'!"
        print(f"警报: 声音 '{sound_name}' ({db:.2f} dB) 超过阈值 ({threshold} dB)!")
        self.status_label.config(text=alert_text, foreground="red")

    def take_screenshot(self):
        timestamp = time.strftime("%Y%m%d-%H%M%S")
        filename = os.path.join(SCREENSHOT_DIR, f"截图_{timestamp}.png")
        try:
            self.update_idletasks()
            time.sleep(0.1)
            x, y, width, height = self.winfo_rootx(), self.winfo_rooty(), self.winfo_width(), self.winfo_height()
            screenshot = ImageGrab.grab(bbox=(x, y, x + width, y + height))
            screenshot.save(filename)
            print(f"截图已保存至 {filename}")
        except Exception as e:
            print(f"截图失败: {e}")

def on_closing(app):
    if is_monitoring:
        app.stop_monitoring()
    app.destroy()

if __name__ == "__main__":
    app = SoundMonitorApp()
    app.protocol("WM_DELETE_WINDOW", lambda: on_closing(app))
    app.mainloop()