#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
实时传感器数据生成器
每5秒使用预测模型生成一次新的传感器数据
"""

import pandas as pd
import numpy as np
import os
import sys
import time
import threading
import pickle
import warnings
import json
from datetime import datetime, timedelta
from queue import Queue
import tensorflow as tf

# 添加项目根目录到路径
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)

# 导入预测模块
predict_dir = os.path.join(project_root, 'predict')
sys.path.append(predict_dir)

warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

class RealtimeDataGenerator:
    """实时传感器数据生成器（带人数预测）"""
    
    def __init__(self, output_file='C:/python/auto_room/data/realtime_sensor_data.csv', max_records=1000):
        """
        初始化生成器
        
        参数:
        output_file: 输出CSV文件路径
        max_records: 最大保留记录数
        """
        self.output_file = output_file
        self.json_file = output_file.replace('.csv', '.json')  # 额外的JSON文件路径
        self.max_records = max_records
        self.model = None
        self.scalers = None
        self.is_running = False
        self.data_queue = Queue()
        
        # 传感器列定义
        self.sensor_columns = [
            'S1_Temp', 'S2_Temp', 'S3_Temp', 'S4_Temp',
            'S1_Light', 'S2_Light', 'S3_Light', 'S4_Light',
            'S1_Sound', 'S2_Sound', 'S3_Sound', 'S4_Sound',
            'S5_CO2', 'S5_CO2_Slope', 'S6_PIR', 'S7_PIR'
        ]
        
        # 种子数据
        self.seed_data = None
        self.window_size = 300
        
        # 预测模型
        self.prediction_models = {}
        self.failed_models = set()  # 记录失败的模型，避免重复报错
        
        # 确保输出目录存在
        os.makedirs(os.path.dirname(self.output_file), exist_ok=True)
        
    def load_model(self):
        """加载传感器预测模型和人数预测模型"""
        # 1. 加载传感器预测模型
        model_path = 'model/saved_models/sensor_predictor.h5'
        scalers_path = 'model/saved_models/sensor_scalers.pkl'
        
        print("[加载] 加载传感器预测模型...")
        
        # 如果模型文件不存在，尝试相对路径
        if not os.path.exists(model_path):
            model_path = '../model/saved_models/sensor_predictor.h5'
            scalers_path = '../model/saved_models/sensor_scalers.pkl'
        
        try:
            # 加载传感器预测模型
            self.model = tf.keras.models.load_model(model_path)
            print(f"[完成] 传感器预测模型加载成功")
            
            # 加载缩放器
            with open(scalers_path, 'rb') as f:
                self.scalers = pickle.load(f)
            print(f"[完成] 缩放器加载成功")
            
        except Exception as e:
            print(f"[错误] 传感器预测模型加载失败: {e}")
            print("[运行] 使用随机数据生成模式...")
            self.model = None
            self.scalers = None
        
        # 2. 加载人数预测模型
        print("\n[加载] 加载人数预测模型...")
        self.load_occupancy_models()
    
    def load_seed_data(self):
        """加载种子数据（历史数据的最后300条）"""
        print("[数据] 加载种子数据...")
        
        # 尝试不同的数据文件路径
        data_paths = [
            'data/room_occupancy.csv',
            '../data/room_occupancy.csv',
            'data/feature_engineering/room_occupancy_with_features.csv'
        ]
        
        for data_path in data_paths:
            if os.path.exists(data_path):
                try:
                    df = pd.read_csv(data_path)
                    print(f"[完成] 从 {data_path} 加载了 {len(df)} 条历史记录")
                    
                    # 确保有DateTime列
                    if 'DateTime' not in df.columns:
                        df['DateTime'] = pd.to_datetime(
                            df['Date'] + ' ' + df['Time'], 
                            format='%d-%m-%Y %H:%M:%S'
                        )
                    
                    # 排序并取最后window_size条记录作为种子
                    df = df.sort_values('DateTime').tail(self.window_size).reset_index(drop=True)
                    
                    # 检查所需列是否存在
                    missing_cols = [col for col in self.sensor_columns if col not in df.columns]
                    if missing_cols:
                        print(f"[警告] 缺少传感器列: {missing_cols}")
                        continue
                    
                    self.seed_data = df
                    print(f"[完成] 种子数据准备完成，时间范围: {df['DateTime'].min()} 到 {df['DateTime'].max()}")
                    return
                    
                except Exception as e:
                    print(f"[错误] 读取 {data_path} 失败: {e}")
                    continue
        
        # 如果所有数据文件都无法加载，创建随机种子数据
        print("[警告] 无法加载历史数据，创建随机种子数据...")
        self.create_random_seed_data()
    
    def create_random_seed_data(self):
        """创建随机种子数据"""
        print("[随机] 创建随机种子数据...")
        
        # 生成时间序列（从30分钟前开始，每30秒一条）
        end_time = datetime.now()
        start_time = end_time - timedelta(seconds=30 * self.window_size)
        
        timestamps = []
        current_time = start_time
        for i in range(self.window_size):
            timestamps.append(current_time)
            current_time += timedelta(seconds=30)
        
        data = []
        for timestamp in timestamps:
            row = {
                'Date': timestamp.strftime('%d-%m-%Y'),
                'Time': timestamp.strftime('%H:%M:%S'),
                'DateTime': timestamp,
                # 温度传感器：20-30°C 的合理范围
                'S1_Temp': np.random.normal(25, 2),
                'S2_Temp': np.random.normal(24.5, 1.5),
                'S3_Temp': np.random.normal(24.8, 1.8),
                'S4_Temp': np.random.normal(25.2, 2.2),
                # 光线传感器：30-200的范围
                'S1_Light': np.random.randint(30, 200),
                'S2_Light': np.random.randint(25, 150),
                'S3_Light': np.random.randint(40, 180),
                'S4_Light': np.random.randint(35, 160),
                # 声音传感器：0-0.5的范围
                'S1_Sound': np.random.uniform(0, 0.5),
                'S2_Sound': np.random.uniform(0, 0.3),
                'S3_Sound': np.random.uniform(0, 0.4),
                'S4_Sound': np.random.uniform(0, 0.6),
                # CO2传感器：350-600 ppm
                'S5_CO2': np.random.normal(450, 50),
                'S5_CO2_Slope': np.random.normal(0, 1),
                # PIR传感器：0或1
                'S6_PIR': np.random.choice([0, 1], p=[0.8, 0.2]),
                'S7_PIR': np.random.choice([0, 1], p=[0.85, 0.15])
            }
            data.append(row)
        
        self.seed_data = pd.DataFrame(data)
        print(f"[完成] 随机种子数据创建完成，共 {len(self.seed_data)} 条")
    
    def prepare_time_features(self, datetime_obj):
        """准备单个时间点的时间特征"""
        time_features = np.array([
            np.sin(2 * np.pi * datetime_obj.hour / 24),        # Hour_sin
            np.cos(2 * np.pi * datetime_obj.hour / 24),        # Hour_cos
            np.sin(2 * np.pi * datetime_obj.weekday() / 7),    # DayOfWeek_sin
            np.cos(2 * np.pi * datetime_obj.weekday() / 7),    # DayOfWeek_cos
            1 if datetime_obj.weekday() >= 5 else 0            # IsWeekend
        ])
        return time_features
    
    def predict_next_data_point(self, current_data):
        """预测下一个数据点"""
        try:
            if self.model is None or self.scalers is None:
                # 如果模型未加载，使用简单的趋势生成
                return self.generate_trend_based_data(current_data)
            
            # 使用最后window_size条数据进行预测
            recent_data = current_data.tail(self.window_size).copy()
            
            # 提取传感器数据
            sensor_data = recent_data[self.sensor_columns].values.astype(np.float32)
            
            # 准备时间特征
            time_features = np.array([
                self.prepare_time_features(dt) for dt in recent_data['DateTime']
            ])
            
            # 合并特征
            combined_features = np.hstack([sensor_data, time_features])
            
            # 标准化
            scaler = self.scalers['combined_scaler']
            normalized_data = scaler.transform(combined_features)
            
            # 重塑为模型输入格式
            input_sequence = normalized_data.reshape(1, self.window_size, -1)
            
            # 预测
            prediction = self.model.predict(input_sequence, verbose=0)
            
            # 反标准化（只取第一个预测点）
            denormalized = scaler.inverse_transform(prediction[0][:1])
            
            # 分离传感器数据
            sensor_prediction = denormalized[0, :len(self.sensor_columns)]
            
            return sensor_prediction
            
        except Exception as e:
            print(f"[警告] 预测失败，使用趋势生成: {e}")
            return self.generate_trend_based_data(current_data)
    
    def generate_trend_based_data(self, current_data):
        """基于趋势生成下一个数据点"""
        if len(current_data) < 2:
            # 如果数据不足，使用最后一条数据加随机变化
            last_row = current_data.iloc[-1] if len(current_data) > 0 else self.seed_data.iloc[-1]
            return np.array([
                last_row['S1_Temp'] + np.random.normal(0, 0.1),
                last_row['S2_Temp'] + np.random.normal(0, 0.1),
                last_row['S3_Temp'] + np.random.normal(0, 0.1),
                last_row['S4_Temp'] + np.random.normal(0, 0.1),
                last_row['S1_Light'] + np.random.normal(0, 5),
                last_row['S2_Light'] + np.random.normal(0, 5),
                last_row['S3_Light'] + np.random.normal(0, 5),
                last_row['S4_Light'] + np.random.normal(0, 5),
                last_row['S1_Sound'] + np.random.normal(0, 0.02),
                last_row['S2_Sound'] + np.random.normal(0, 0.02),
                last_row['S3_Sound'] + np.random.normal(0, 0.02),
                last_row['S4_Sound'] + np.random.normal(0, 0.02),
                last_row['S5_CO2'] + np.random.normal(0, 2),
                last_row['S5_CO2_Slope'] + np.random.normal(0, 0.1),
                np.random.choice([0, 1], p=[0.8, 0.2]),  # S6_PIR
                np.random.choice([0, 1], p=[0.85, 0.15])  # S7_PIR
            ])
        
        # 计算简单趋势
        last_row = current_data.iloc[-1]
        prev_row = current_data.iloc[-2]
        
        prediction = []
        for col in self.sensor_columns:
            if 'PIR' in col:
                # PIR传感器随机生成
                prediction.append(np.random.choice([0, 1], p=[0.8, 0.2]))
            else:
                # 其他传感器基于趋势
                trend = (last_row[col] - prev_row[col]) * 0.5  # 缓和趋势
                noise_scale = 0.1 if 'Temp' in col else (5 if 'Light' in col else 0.02)
                new_value = last_row[col] + trend + np.random.normal(0, noise_scale)
                prediction.append(new_value)
        
        return np.array(prediction)
    
    def apply_sensor_constraints(self, prediction):
        """应用传感器数据约束"""
        # 温度传感器：15-35°C
        prediction[0:4] = np.clip(prediction[0:4], 15, 35)
        
        # 光线传感器：0-1000
        prediction[4:8] = np.clip(prediction[4:8], 0, 1000)
        
        # 声音传感器：0-2
        prediction[8:12] = np.clip(prediction[8:12], 0, 2)
        
        # CO2传感器：300-1000 ppm
        prediction[12] = np.clip(prediction[12], 300, 1000)
        
        # CO2斜率：-50到50
        prediction[13] = np.clip(prediction[13], -50, 50)
        
        # PIR传感器：0或1
        prediction[14:16] = np.round(np.clip(prediction[14:16], 0, 1)).astype(int)
        
        return prediction
    
    def load_occupancy_models(self):
        """加载三个人数预测模型"""
        import joblib
        
        model_dir = 'model/saved_models'
        if not os.path.exists(model_dir):
            model_dir = '../model/saved_models'
        
        try:
            # 加载随机森林模型
            rf_path = os.path.join(model_dir, 'random_forest_model.joblib')
            if os.path.exists(rf_path):
                self.prediction_models['rf'] = joblib.load(rf_path)
                print("  [完成] 随机森林模型加载成功")
            
            # 加载XGBoost模型
            xgb_path = os.path.join(model_dir, 'xgboost_model.joblib')
            if os.path.exists(xgb_path):
                self.prediction_models['xgb'] = joblib.load(xgb_path)
                print("  [完成] XGBoost模型加载成功")
            
            # 加载LSTM模型
            lstm_path = os.path.join(model_dir, 'lstm_model.h5')
            lstm_scalers_path = os.path.join(model_dir, 'lstm_scalers.joblib')
            if os.path.exists(lstm_path) and os.path.exists(lstm_scalers_path):
                self.prediction_models['lstm'] = tf.keras.models.load_model(lstm_path)
                self.prediction_models['lstm_scalers'] = joblib.load(lstm_scalers_path)
                print("  [完成] LSTM模型加载成功")
            
            print(f"[完成] 已加载 {len([k for k in self.prediction_models.keys() if k != 'lstm_scalers'])} 个人数预测模型")
            
        except Exception as e:
            print(f"[错误] 人数预测模型加载失败: {e}")
    
    def process_data_for_prediction(self, record):
        """处理数据以供预测使用"""
        # 创建DataFrame
        df = pd.DataFrame([record])
        
        # 添加DateTime列（如果没有）
        if 'DateTime' not in df.columns and 'Date' in df.columns and 'Time' in df.columns:
            df['DateTime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'], format='%d-%m-%Y %H:%M:%S')
        
        # 添加时间特征
        if 'DateTime' in df.columns:
            df['Hour'] = df['DateTime'].dt.hour
            df['Minute'] = df['DateTime'].dt.minute
            df['DayOfWeek'] = df['DateTime'].dt.dayofweek
            df['IsWeekend'] = (df['DateTime'].dt.dayofweek >= 5).astype(int)
            df['TimeOfDay'] = df['Hour'].apply(lambda x: 0 if 5 <= x < 12 else 1 if 12 <= x < 18 else 2 if 18 <= x < 22 else 3)
            df['IsWorkingHour'] = (((df['Hour'] >= 9) & (df['Hour'] < 18)) & (df['DayOfWeek'] < 5)).astype(int)
        
        # 提取传感器列
        temp_cols = [col for col in df.columns if 'Temp' in col and col.startswith('S')]
        light_cols = [col for col in df.columns if 'Light' in col and col.startswith('S')]
        sound_cols = [col for col in df.columns if 'Sound' in col and col.startswith('S')]
        pir_cols = [col for col in df.columns if 'PIR' in col and col.startswith('S')]
        
        # 计算统计特征
        df['Temp_Mean'] = df[temp_cols].mean(axis=1)
        df['Temp_Var'] = df[temp_cols].var(axis=1)
        df['Temp_Max'] = df[temp_cols].max(axis=1)
        df['Temp_Min'] = df[temp_cols].min(axis=1)
        df['Temp_Range'] = df['Temp_Max'] - df['Temp_Min']
        
        df['Light_Mean'] = df[light_cols].mean(axis=1)
        df['Light_Var'] = df[light_cols].var(axis=1)
        df['Light_Max'] = df[light_cols].max(axis=1)
        df['Light_Min'] = df[light_cols].min(axis=1)
        
        df['Sound_Mean'] = df[sound_cols].mean(axis=1)
        df['Sound_Var'] = df[sound_cols].var(axis=1)
        df['Sound_Max'] = df[sound_cols].max(axis=1)
        
        df['PIR_Sum'] = df[pir_cols].sum(axis=1)
        df['PIR_Any'] = (df['PIR_Sum'] > 0).astype(int)
        
        # 创建交互特征
        df['Temp_Mean_x_Light_Mean'] = df['Temp_Mean'] * df['Light_Mean']
        df['Temp_Mean_x_Sound_Mean'] = df['Temp_Mean'] * df['Sound_Mean']
        df['Temp_Mean_x_S5_CO2'] = df['Temp_Mean'] * df['S5_CO2']
        df['Light_Mean_x_Sound_Mean'] = df['Light_Mean'] * df['Sound_Mean']
        df['Light_Mean_x_S5_CO2'] = df['Light_Mean'] * df['S5_CO2']
        df['Sound_Mean_x_S5_CO2'] = df['Sound_Mean'] * df['S5_CO2']
        
        # 删除不需要的列
        drop_cols = ['Date', 'Time', 'DateTime']
        df = df.drop([col for col in drop_cols if col in df.columns], axis=1)
        
        return df
    
    def predict_occupancy(self, record):
        """使用三个模型预测人数并取中位数"""
        predictions = []
        
        try:
            # 处理数据
            processed_data = self.process_data_for_prediction(record)
            
            # 1. 随机森林预测
            if 'rf' in self.prediction_models and 'rf' not in self.failed_models:
                try:
                    rf_pred = self.prediction_models['rf'].predict(processed_data)[0]
                    predictions.append(round(float(rf_pred)))
                except Exception as e:
                    if 'rf' not in self.failed_models:
                        print(f"  [警告] RF预测失败（版本兼容性问题）")
                        print(f"  [信息] 将使用XGBoost和LSTM模型继续预测")
                        self.failed_models.add('rf')
            
            # 2. XGBoost预测
            if 'xgb' in self.prediction_models and 'xgb' not in self.failed_models:
                try:
                    xgb_pred = self.prediction_models['xgb'].predict(processed_data)[0]
                    predictions.append(round(float(xgb_pred)))
                except Exception as e:
                    if 'xgb' not in self.failed_models:
                        print(f"  [警告] XGB预测失败: {e}")
                        self.failed_models.add('xgb')
            
            # 3. LSTM预测
            if 'lstm' in self.prediction_models and 'lstm' not in self.failed_models:
                try:
                    scalers = self.prediction_models['lstm_scalers']
                    X = processed_data.values
                    
                    # 分别缩放
                    important_indices = scalers['important_indices']
                    other_indices = scalers['other_indices']
                    sequence_length = scalers['sequence_length']
                    
                    # 处理特征数量
                    feature_count = max(max(important_indices + other_indices) + 1 if important_indices or other_indices else 0, X.shape[1])
                    if feature_count > X.shape[1]:
                        X_safe = np.zeros((X.shape[0], feature_count))
                        X_safe[:, :X.shape[1]] = X
                        X = X_safe
                    
                    # 缩放
                    if len(important_indices) > 0:
                        X_important = scalers['important_scaler'].transform(X[:, important_indices])
                    else:
                        X_important = np.array([]).reshape(X.shape[0], 0)
                    
                    if len(other_indices) > 0:
                        X_other = scalers['other_scaler'].transform(X[:, other_indices])
                    else:
                        X_other = np.array([]).reshape(X.shape[0], 0)
                    
                    X_scaled = np.hstack([X_important, X_other]) if X_important.size > 0 and X_other.size > 0 else (X_important if X_important.size > 0 else X_other)
                    
                    # 创建序列
                    X_sequence = np.tile(X_scaled, (1, sequence_length, 1))
                    X_sequence = X_sequence.reshape(1, sequence_length, X_scaled.shape[1])
                    
                    # 预测
                    lstm_pred_scaled = self.prediction_models['lstm'].predict(X_sequence, verbose=0)
                    lstm_pred = scalers['y_scaler'].inverse_transform(lstm_pred_scaled)[0][0]
                    predictions.append(round(float(lstm_pred)))
                    
                except Exception as e:
                    if 'lstm' not in self.failed_models:
                        print(f"  [警告] LSTM预测失败: {e}")
                        self.failed_models.add('lstm')
            
            # 计算中位数或平均值
            if len(predictions) >= 2:
                occupancy = int(np.median(predictions))
            elif len(predictions) == 1:
                occupancy = predictions[0]
            else:
                # 如果所有模型都失败，使用简单启发式规则
                occupancy = self.simple_heuristic_prediction(record)
                predictions = [occupancy]
            
            return occupancy, predictions
            
        except Exception as e:
            print(f"[错误] 预测人数失败: {e}")
            return 0, []
    
    def simple_heuristic_prediction(self, record):
        """简单启发式预测（当模型失败时使用）"""
        # 基于传感器数据的简单规则
        try:
            avg_temp = (record['S1_Temp'] + record['S2_Temp'] + record['S3_Temp'] + record['S4_Temp']) / 4
            avg_light = (record['S1_Light'] + record['S2_Light'] + record['S3_Light'] + record['S4_Light']) / 4
            co2 = record['S5_CO2']
            pir_sum = record['S6_PIR'] + record['S7_PIR']
            
            # 简单规则：基于CO2和PIR判断
            if co2 < 400 and pir_sum == 0:
                return 0  # 无人
            elif co2 < 450 and pir_sum <= 1:
                return 1  # 1人
            elif co2 < 550:
                return 2  # 2人
            else:
                return 3  # 3人
        except:
            return 1  # 默认1人
    
    def generate_next_record(self, current_data):
        """生成下一条记录（包含传感器数据和预测人数）"""
        # 获取下一个时间点
        last_datetime = current_data['DateTime'].iloc[-1]
        next_datetime = last_datetime + timedelta(seconds=30)
        
        # 预测传感器数据
        prediction = self.predict_next_data_point(current_data)
        prediction = self.apply_sensor_constraints(prediction)
        
        # 构建新记录
        new_record = {
            'Date': next_datetime.strftime('%d-%m-%Y'),
            'Time': next_datetime.strftime('%H:%M:%S'),
            'DateTime': next_datetime
        }
        
        # 添加传感器数据（保留适当的小数位数）
        for i, col in enumerate(self.sensor_columns):
            if 'PIR' in col:
                new_record[col] = int(prediction[i])
            elif 'Temp' in col:
                new_record[col] = round(float(prediction[i]), 2)
            elif 'Light' in col:
                new_record[col] = int(round(float(prediction[i])))
            else:
                new_record[col] = round(float(prediction[i]), 3)
        
        # 预测房间占用人数
        if self.prediction_models:
            occupancy, model_predictions = self.predict_occupancy(new_record)
            new_record['Room_Occupancy_Count'] = occupancy
            new_record['RF_Prediction'] = model_predictions[0] if len(model_predictions) > 0 else None
            new_record['XGB_Prediction'] = model_predictions[1] if len(model_predictions) > 1 else None
            new_record['LSTM_Prediction'] = model_predictions[2] if len(model_predictions) > 2 else None
        else:
            new_record['Room_Occupancy_Count'] = 0
        
        return new_record
    
    def save_to_csv(self, data):
        """保存数据到CSV文件"""
        try:
            # 准备保存的数据（去掉DateTime列）
            save_data = data.drop(columns=['DateTime'], errors='ignore').copy()
            
            # 控制文件大小，只保留最近的记录
            if len(save_data) > self.max_records:
                save_data = save_data.tail(self.max_records)
            
            # 原子性写入：先写入临时文件，再重命名
            temp_file = self.output_file + '.tmp'
            save_data.to_csv(temp_file, index=False)
            
            # 在Windows上，先删除目标文件（如果存在）
            if os.path.exists(self.output_file):
                os.remove(self.output_file)
            
            os.rename(temp_file, self.output_file)
            
        except Exception as e:
            print(f"[错误] 保存CSV文件失败: {e}")
    
    def save_latest_to_json(self, latest_record):
        """额外保存最新的一条记录到JSON文件（包含人数预测）"""
        if latest_record is None:
            return
            
        try:
            # 构建JSON数据结构
            json_data = {
                "timestamp": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                "data": {
                    "Date": latest_record['Date'],
                    "Time": latest_record['Time'],
                    "sensors": {
                        "temperature": {
                            "S1_Temp": float(latest_record['S1_Temp']),
                            "S2_Temp": float(latest_record['S2_Temp']),
                            "S3_Temp": float(latest_record['S3_Temp']),
                            "S4_Temp": float(latest_record['S4_Temp'])
                        },
                        "light": {
                            "S1_Light": int(latest_record['S1_Light']),
                            "S2_Light": int(latest_record['S2_Light']),
                            "S3_Light": int(latest_record['S3_Light']),
                            "S4_Light": int(latest_record['S4_Light'])
                        },
                        "sound": {
                            "S1_Sound": float(latest_record['S1_Sound']),
                            "S2_Sound": float(latest_record['S2_Sound']),
                            "S3_Sound": float(latest_record['S3_Sound']),
                            "S4_Sound": float(latest_record['S4_Sound'])
                        },
                        "environment": {
                            "S5_CO2": float(latest_record['S5_CO2']),
                            "S5_CO2_Slope": float(latest_record['S5_CO2_Slope']),
                            "S6_PIR": int(latest_record['S6_PIR']),
                            "S7_PIR": int(latest_record['S7_PIR'])
                        }
                    },
                    "occupancy": {
                        "current_count": int(latest_record.get('Room_Occupancy_Count', 0)),
                        "rf_prediction": int(latest_record['RF_Prediction']) if latest_record.get('RF_Prediction') is not None else None,
                        "xgb_prediction": int(latest_record['XGB_Prediction']) if latest_record.get('XGB_Prediction') is not None else None,
                        "lstm_prediction": int(latest_record['LSTM_Prediction']) if latest_record.get('LSTM_Prediction') is not None else None,
                        "prediction_method": "median"
                    },
                    "summary": {
                        "avg_temperature": float((latest_record['S1_Temp'] + latest_record['S2_Temp'] + 
                                                 latest_record['S3_Temp'] + latest_record['S4_Temp']) / 4),
                        "avg_light": float((latest_record['S1_Light'] + latest_record['S2_Light'] + 
                                           latest_record['S3_Light'] + latest_record['S4_Light']) / 4),
                        "avg_sound": float((latest_record['S1_Sound'] + latest_record['S2_Sound'] + 
                                           latest_record['S3_Sound'] + latest_record['S4_Sound']) / 4),
                        "motion_detected": bool(latest_record['S6_PIR'] or latest_record['S7_PIR']),
                        "co2_level": float(latest_record['S5_CO2']),
                        "co2_trend": "increasing" if latest_record['S5_CO2_Slope'] > 0.1 else 
                                    ("decreasing" if latest_record['S5_CO2_Slope'] < -0.1 else "stable"),
                        "occupancy": int(latest_record.get('Room_Occupancy_Count', 0))
                    }
                },
                "status": "active",
                "generated_at": datetime.now().isoformat()
            }
            
            # 直接覆盖写入JSON文件
            with open(self.json_file, 'w', encoding='utf-8') as f:
                json.dump(json_data, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            print(f"[错误] 保存JSON文件失败: {e}")
    
    def data_generation_loop(self):
        """数据生成主循环"""
        print("[运行] 启动数据生成循环...")
        
        # 初始化当前数据为种子数据
        current_data = self.seed_data.copy()
        
        # 保存初始种子数据
        self.save_to_csv(current_data)
        # 额外保存最后一条数据到JSON
        self.save_latest_to_json(current_data.iloc[-1].to_dict())
        print(f"[保存] 初始数据已保存到 {self.output_file}")
        print(f"[文件] JSON文件已创建: {self.json_file}")
        
        generation_count = 0
        
        while self.is_running:
            try:
                # 生成下一条记录
                new_record = self.generate_next_record(current_data)
                
                # 添加到当前数据
                current_data = pd.concat([current_data, pd.DataFrame([new_record])], ignore_index=True)
                
                # 控制内存中的数据量
                if len(current_data) > self.window_size + 100:
                    current_data = current_data.tail(self.window_size + 50).reset_index(drop=True)
                
                # 保存到文件
                self.save_to_csv(current_data)
                
                # 额外保存最新数据到JSON文件
                self.save_latest_to_json(new_record)
                
                generation_count += 1
                
                # 打印状态
                if generation_count % 10 == 0:  # 每10次生成打印一次状态
                    print(f"[数据] 已生成 {generation_count} 条数据，最新时间: {new_record['DateTime']}")
                    
                    # 显示最新的传感器值和预测人数
                    print(f"   温度: {new_record['S1_Temp']:.1f}°C, 光线: {new_record['S1_Light']}, CO2: {new_record['S5_CO2']:.0f}")
                    if 'Room_Occupancy_Count' in new_record:
                        print(f"   [人数] 预测人数: {new_record['Room_Occupancy_Count']} 人")
                
                # 等待5秒
                time.sleep(5)
                
            except Exception as e:
                print(f"[错误] 数据生成出错: {e}")
                time.sleep(5)  # 出错后等待5秒再试
    
    def start(self):
        """启动数据生成器"""
        if self.is_running:
            print("[警告] 数据生成器已在运行中")
            return
        
        print("=" * 60)
        print("[启动] 实时传感器数据生成器")
        print("=" * 60)
        
        # 加载模型
        self.load_model()
        
        # 加载种子数据
        self.load_seed_data()
        
        if self.seed_data is None:
            print("[错误] 无法加载种子数据，启动失败")
            return
        
        # 设置运行标志
        self.is_running = True
        
        # 启动数据生成线程
        self.generation_thread = threading.Thread(target=self.data_generation_loop, daemon=True)
        self.generation_thread.start()
        
        print(f"[完成] 数据生成器已启动")
        print(f"[文件] CSV输出文件: {self.output_file}")
        print(f"[文件] JSON输出文件: {self.json_file}")
        print(f"[时间] 生成间隔: 5秒")
        print(f"[数据] 最大记录数: {self.max_records}")
        print("[运行] 数据生成中... (按 Ctrl+C 停止)")
    
    def stop(self):
        """停止数据生成器"""
        if not self.is_running:
            print("[警告] 数据生成器未在运行")
            return
        
        print("\n[停止] 正在停止数据生成器...")
        self.is_running = False
        
        # 等待线程结束
        if hasattr(self, 'generation_thread'):
            self.generation_thread.join(timeout=10)
        
        print("[完成] 数据生成器已停止")

def main():
    """主函数"""
    generator = RealtimeDataGenerator()
    
    try:
        generator.start()
        
        # 保持主线程运行
        while True:
            time.sleep(1)
            
    except KeyboardInterrupt:
        print("\n[警告] 收到停止信号...")
        generator.stop()
        print("[退出] 程序已退出")

if __name__ == "__main__":
    main() 