import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import joblib
import matplotlib.pyplot as plt
import seaborn as sns
import os

class ActivityMonitor:
    def __init__(self, data_path=None, metadata_path=None):
        """
        初始化活动监测模型
        :param data_path: 穿戴设备数据文件路径
        :param metadata_path: 用户元数据文件路径
        """
        self.data = self.load_data(data_path) if data_path else None
        self.metadata = self.load_data(metadata_path) if metadata_path else None
        self.features = ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']
        self.label = 'activity'
        self.model = RandomForestClassifier(n_estimators=100, random_state=42)
        
    def load_data(self, path):
        """
        加载传感器数据
        """
        if not path:
            return None
        
        if not os.path.exists(path):
            print(f"文件不存在: {path}")
            return None
            
        try:
            # 首先尝试UTF-8编码
            return pd.read_csv(path, encoding='utf-8')
        except UnicodeDecodeError:
            # 如果UTF-8失败，尝试GBK编码
            try:
                print(f"UTF-8编码失败，尝试GBK编码...")
                return pd.read_csv(path, encoding='gbk')
            except Exception as e:
                print(f"GBK编码也失败: {e}")
                return None
        except FileNotFoundError:
            print(f"文件路径错误: {path}")
            return None
        except Exception as e:
            print(f"数据加载失败: {e}")
            return None

    def generate_sensor_data(self, n_samples=1000):
        """
        生成模拟的传感器数据用于测试
        :param n_samples: 样本数量
        :return: 包含模拟传感器数据的DataFrame
        """
        if self.metadata is None:
            print("缺少元数据，无法生成关联的传感器数据")
            # 创建随机用户ID
            user_ids = [f"P{i:03d}" for i in range(1, 11)]
            sensor_data = pd.DataFrame({
                'pid': np.random.choice(user_ids, n_samples)
            })
        else:
            # 使用实际的用户IDs
            user_ids = self.metadata['pid'].tolist()
            sensor_data = pd.DataFrame({
                'pid': np.random.choice(user_ids, n_samples)
            })
        
        # 生成模拟的加速度和陀螺仪数据
        for feature in self.features:
            sensor_data[feature] = np.random.normal(0, 1, n_samples)
            
        # 生成模拟的活动标签
        activities = ['walking', 'running', 'sitting', 'standing', 'lying']
        sensor_data[self.label] = np.random.choice(activities, n_samples)
        
        # 添加时间戳
        start_time = pd.Timestamp('2023-01-01')
        timestamps = [start_time + pd.Timedelta(seconds=i*10) for i in range(n_samples)]
        sensor_data['timestamp'] = timestamps
        
        self.data = sensor_data
        print(f"已生成{n_samples}条模拟传感器数据")
        return sensor_data
        
    def save_sensor_data(self, output_path):
        """
        保存生成的传感器数据
        """
        if self.data is not None:
            self.data.to_csv(output_path, index=False)
            print(f"传感器数据已保存至 {output_path}")
            return True
        return False

    def preprocess(self):
        """
        数据预处理：特征工程和缺失值处理
        """
        if self.data is not None:
            # 处理缺失值
            self.data = self.data.dropna(subset=self.features)
            
            # 添加时域特征
            self.data['acc_magnitude'] = self.data[['acc_x', 'acc_y', 'acc_z']].apply(
                lambda x: (x**2).sum()**0.5, axis=1)
            self.data['gyro_magnitude'] = self.data[['gyro_x', 'gyro_y', 'gyro_z']].apply(
                lambda x: (x**2).sum()**0.5, axis=1)
                
            # 如果存在用户ID和元数据，则合并用户信息
            if 'pid' in self.data.columns and self.metadata is not None:
                self.data = pd.merge(self.data, self.metadata, on='pid', how='left')
                
                # 将年龄范围转为数值特征
                if 'age' in self.data.columns:
                    self.data['age_min'] = self.data['age'].apply(lambda x: int(x.split('-')[0]) if isinstance(x, str) and '-' in x else 0)
                    self.data['age_max'] = self.data['age'].apply(lambda x: int(x.split('-')[1]) if isinstance(x, str) and '-' in x else 0)
                    self.data['age_avg'] = (self.data['age_min'] + self.data['age_max']) / 2
                
                # 性别编码
                if 'sex' in self.data.columns:
                    self.data['sex_code'] = self.data['sex'].map({'M': 0, 'F': 1})
            
            return True
        return False

    def add_time_features(self):
        """
        添加时间相关特征，如果数据中有时间戳
        """
        if self.data is not None and 'timestamp' in self.data.columns:
            # 转换时间戳为datetime对象
            if not pd.api.types.is_datetime64_any_dtype(self.data['timestamp']):
                try:
                    self.data['datetime'] = pd.to_datetime(self.data['timestamp'])
                except Exception as e:
                    print(f"时间戳转换失败: {e}")
                    return False
            else:
                self.data['datetime'] = self.data['timestamp']
                
            # 提取时间特征
            self.data['hour'] = self.data['datetime'].dt.hour
            self.data['day_of_week'] = self.data['datetime'].dt.dayofweek
            self.data['is_weekend'] = self.data['day_of_week'].apply(lambda x: 1 if x >= 5 else 0)
            return True
        return False

    def feature_importance(self):
        """
        分析并可视化特征重要性
        """
        if hasattr(self, 'model') and hasattr(self.model, 'feature_importances_'):
            # 获取特征名称和对应重要性
            feature_names = self.features + ['acc_magnitude', 'gyro_magnitude']
            if 'age_avg' in self.data.columns:
                feature_names += ['age_avg']
            if 'sex_code' in self.data.columns:
                feature_names += ['sex_code']
            if 'hour' in self.data.columns:
                feature_names += ['hour', 'day_of_week', 'is_weekend']
            
            # 确保特征名称与训练时使用的特征匹配
            if len(feature_names) != len(self.model.feature_importances_):
                print(f"警告: 特征数量不匹配 (特征名称: {len(feature_names)}, 特征重要性: {len(self.model.feature_importances_)})")
                return None
            
            # 创建特征重要性DataFrame
            importances = self.model.feature_importances_
            indices = np.argsort(importances)[::-1]
            
            # 绘制特征重要性图
            plt.figure(figsize=(10, 6))
            plt.title('特征重要性')
            plt.bar(range(len(indices)), importances[indices], align='center')
            plt.xticks(range(len(indices)), [feature_names[i] for i in indices], rotation=90)
            plt.tight_layout()
            plt.savefig('feature_importance.png')
            plt.close()
            
            # 返回特征重要性排序
            return pd.DataFrame({
                'feature': [feature_names[i] for i in indices],
                'importance': importances[indices]
            })
        return None

    def train(self):
        """
        训练活动分类模型
        """
        # 如果没有数据，则生成模拟数据
        if self.data is None:
            self.generate_sensor_data()
            
        if self.preprocess():
            # 添加时间特征
            self.add_time_features()
            
            # 确定用于训练的特征
            train_features = self.features + ['acc_magnitude', 'gyro_magnitude']
            
            # 添加人口统计学特征
            if 'age_avg' in self.data.columns:
                train_features += ['age_avg']
            if 'sex_code' in self.data.columns:
                train_features += ['sex_code']
            
            # 添加时间特征
            if 'hour' in self.data.columns:
                train_features += ['hour', 'day_of_week', 'is_weekend']
                
            X = self.data[train_features]
            y = self.data[self.label]
            
            # 确保所有特征都是数值型
            X = X.apply(pd.to_numeric, errors='coerce')
            X = X.fillna(X.mean())
            
            X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
            self.model.fit(X_train, y_train)
            
            # 评估模型
            y_pred = self.model.predict(X_test)
            accuracy = accuracy_score(y_test, y_pred)
            report = classification_report(y_test, y_pred)
            
            print(f"准确率: {accuracy:.2f}")
            print(report)
            
            # 绘制混淆矩阵
            self.plot_confusion_matrix(y_test, y_pred)
            
            # 分析特征重要性
            importance_df = self.feature_importance()
            if importance_df is not None:
                print("特征重要性:")
                print(importance_df)
            
            return accuracy, report
        return None, None

    def plot_confusion_matrix(self, y_true, y_pred):
        """
        绘制混淆矩阵
        """
        cm = confusion_matrix(y_true, y_pred)
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=sorted(set(y_true)), yticklabels=sorted(set(y_true)))
        plt.xlabel('预测活动')
        plt.ylabel('实际活动')
        plt.title('混淆矩阵')
        plt.tight_layout()
        plt.savefig('confusion_matrix.png')
        plt.close()

    def save_model(self, output_path):
        """
        保存训练好的模型
        """
        if hasattr(self, 'model'):
            joblib.dump(self.model, output_path)
            print(f"模型已保存至 {output_path}")
            return True
        return False
        
    def load_model(self, model_path):
        """
        加载已训练好的模型
        """
        if os.path.exists(model_path):
            self.model = joblib.load(model_path)
            print(f"模型已从 {model_path} 加载")
            return True
        else:
            print(f"模型文件不存在: {model_path}")
            return False
        
    def predict(self, new_data):
        """
        对新数据进行活动预测
        :param new_data: 包含相同特征的新数据
        :return: 预测的活动类型
        """
        if not hasattr(self, 'model'):
            print("错误: 模型尚未训练或加载")
            return None
            
        if isinstance(new_data, pd.DataFrame):
            # 确定用于预测的特征（与训练时相同）
            predict_features = self.features + ['acc_magnitude', 'gyro_magnitude']
            
            # 计算必要的特征
            if all(feature in new_data.columns for feature in ['acc_x', 'acc_y', 'acc_z']):
                new_data['acc_magnitude'] = new_data[['acc_x', 'acc_y', 'acc_z']].apply(
                    lambda x: (x**2).sum()**0.5, axis=1)
            else:
                print("警告: 缺少加速度数据，无法计算acc_magnitude")
                return None
                
            if all(feature in new_data.columns for feature in ['gyro_x', 'gyro_y', 'gyro_z']):
                new_data['gyro_magnitude'] = new_data[['gyro_x', 'gyro_y', 'gyro_z']].apply(
                    lambda x: (x**2).sum()**0.5, axis=1)
            else:
                print("警告: 缺少陀螺仪数据，无法计算gyro_magnitude")
                return None
            
            # 添加可能在训练中使用的其他特征，用默认值填充
            if 'age_avg' not in new_data.columns and 'age_avg' in self.data.columns:
                new_data['age_avg'] = self.data['age_avg'].mean()
                predict_features.append('age_avg')
                
            if 'sex_code' not in new_data.columns and 'sex_code' in self.data.columns:
                new_data['sex_code'] = 0  # 默认值
                predict_features.append('sex_code')
                
            # 添加时间相关特征
            if 'hour' in self.data.columns and 'hour' not in new_data.columns:
                new_data['hour'] = 12  # 默认中午
                predict_features.append('hour')
                
            if 'day_of_week' in self.data.columns and 'day_of_week' not in new_data.columns:
                new_data['day_of_week'] = 3  # 默认星期三
                predict_features.append('day_of_week')
                
            if 'is_weekend' in self.data.columns and 'is_weekend' not in new_data.columns:
                new_data['is_weekend'] = 0  # 默认工作日
                predict_features.append('is_weekend')
                
            # 确保所有特征都存在
            missing_features = [feature for feature in predict_features if feature not in new_data.columns]
            if missing_features:
                print(f"警告: 新数据中缺少特征: {missing_features}")
                return None
                    
            return self.model.predict(new_data[predict_features])
        else:
            print("预测数据必须是DataFrame格式")
            return None
            
    def analyze_by_demographic(self):
        """
        按人口统计学特征分析活动模式
        """
        if self.data is None:
            print("缺少数据，无法进行人口统计学分析")
            return None
            
        if 'sex' in self.data.columns and 'activity' in self.data.columns:
            # 按性别分析活动分布
            sex_activity = pd.crosstab(self.data['sex'], self.data['activity'], normalize='index')
            
            plt.figure(figsize=(12, 6))
            sex_activity.plot(kind='bar', stacked=True)
            plt.title('不同性别的活动分布')
            plt.xlabel('性别')
            plt.ylabel('比例')
            plt.tight_layout()
            plt.savefig('activity_by_sex.png')
            plt.close()
            
        if 'age_avg' in self.data.columns and 'activity' in self.data.columns:
            # 创建年龄组
            bins = [0, 20, 30, 40, 50, 60, 100]
            labels = ['<20', '20-30', '30-40', '40-50', '50-60', '>60']
            self.data['age_group'] = pd.cut(self.data['age_avg'], bins=bins, labels=labels)
            
            # 按年龄组分析活动分布
            age_activity = pd.crosstab(self.data['age_group'], self.data['activity'], normalize='index')
            
            plt.figure(figsize=(12, 6))
            age_activity.plot(kind='bar', stacked=True)
            plt.title('不同年龄组的活动分布')
            plt.xlabel('年龄组')
            plt.ylabel('比例')
            plt.tight_layout()
            plt.savefig('activity_by_age.png')
            plt.close()
            
        return True

    def activity_summary(self):
        """
        生成活动摘要报告
        """
        if self.data is None:
            print("缺少数据，无法生成摘要")
            return None
            
        summary = {}
        
        # 活动分布
        if 'activity' in self.data.columns:
            activity_counts = self.data['activity'].value_counts()
            summary['activity_distribution'] = activity_counts
            
            # 绘制活动分布饼图
            plt.figure(figsize=(10, 6))
            plt.pie(activity_counts, labels=activity_counts.index, autopct='%1.1f%%')
            plt.title('活动类型分布')
            plt.tight_layout()
            plt.savefig('activity_distribution.png')
            plt.close()
        
        # 时间分布（如果有时间数据）
        if 'hour' in self.data.columns:
            hourly_activity = pd.crosstab(self.data['hour'], self.data['activity'])
            summary['hourly_activity'] = hourly_activity
            
            # 绘制每小时活动热图
            plt.figure(figsize=(12, 8))
            sns.heatmap(hourly_activity, cmap='YlGnBu', linewidths=0.5)
            plt.title('每小时活动分布')
            plt.xlabel('活动类型')
            plt.ylabel('小时')
            plt.tight_layout()
            plt.savefig('hourly_activity.png')
            plt.close()
            
        return summary

    def analyze_volunteer_activity(self, input_path=None, output_file="result_1.xlsx"):
        """
        根据MET值分析志愿者的活动情况，统计不同强度活动的时长
        :param input_path: 输入路径，可以是文件或文件夹
        :param output_file: 输出Excel文件路径
        :return: 活动统计结果DataFrame
        """
        # 处理输入路径
        if input_path is None:
            data = self.data
        else:
            # 检查输入是文件还是文件夹
            if os.path.isdir(input_path):
                print(f"检测到输入路径 {input_path} 是文件夹，将合并处理所有数据文件...")
                # 获取文件夹中的所有CSV文件
                csv_files = [os.path.join(input_path, f) for f in os.listdir(input_path) 
                            if f.endswith('.csv') and 'Metadata' not in f]
                            
                # 从CSV文件名中提取志愿者ID
                volunteer_files = {}
                for file in csv_files:
                    file_name = os.path.basename(file)
                    if file_name.startswith('P') and len(file_name) >= 4:
                        volunteer_id = file_name.split('.')[0]
                        volunteer_files[volunteer_id] = file
                
                if not volunteer_files:
                    print("错误: 在文件夹中未找到志愿者数据文件")
                    return None
                
                print(f"找到以下志愿者数据文件: {list(volunteer_files.keys())}")
                
                # 处理元数据文件（如果存在）
                metadata_file = os.path.join(input_path, "Metadata1.csv")
                metadata = None
                if os.path.exists(metadata_file):
                    print(f"正在加载元数据文件: {metadata_file}")
                    try:
                        metadata = self.load_data(metadata_file)
                        print(f"成功加载元数据，共 {len(metadata)} 条记录")
                    except Exception as e:
                        print(f"加载元数据时出错: {e}")
                
                # 读取并处理每个志愿者的数据
                all_data = []
                for volunteer_id, file_path in volunteer_files.items():
                    print(f"处理志愿者 {volunteer_id} 的数据...")
                    try:
                        # 读取数据文件
                        df = self.load_data(file_path)
                        if df is None:
                            print(f"无法读取 {file_path}，跳过")
                            continue
                            
                        # 添加pid列（如果不存在）
                        if 'pid' not in df.columns:
                            df['pid'] = volunteer_id
                            
                        # 如果没有MET列但有加速度数据，尝试计算MET值
                        if 'MET' not in df.columns and all(col in df.columns for col in ['acc_x', 'acc_y', 'acc_z']):
                            print(f"为 {volunteer_id} 计算MET值...")
                            try:
                                # 使用加速度计算MET值的简单估算
                                magnitude = (df['acc_x']**2 + df['acc_y']**2 + df['acc_z']**2)**0.5
                                # 转换为MET值范围（估算）
                                df['MET'] = 1.0 + 7.0 * (magnitude / magnitude.max())
                            except Exception as e:
                                print(f"计算MET值时出错: {e}")
                                continue
                                
                        # 如果数据中没有时间戳，无法计算时间差
                        if 'timestamp' not in df.columns:
                            print(f"警告: {volunteer_id} 的数据中没有时间戳列，使用默认时间间隔")
                            # 假设固定采样率
                            df['time_diff'] = 1.0/60.0  # 默认1分钟间隔，转换为小时
                        
                        all_data.append(df)
                        print(f"成功处理 {volunteer_id} 的数据，共 {len(df)} 条记录")
                    except Exception as e:
                        print(f"处理 {volunteer_id} 数据时出错: {e}")
                
                if not all_data:
                    print("错误: 未能成功处理任何数据文件")
                    return None
                
                # 合并所有数据
                data = pd.concat(all_data, ignore_index=True)
                print(f"合并完成，共 {len(data)} 条记录")
            else:
                # 输入是单个文件
                data = self.load_data(input_path)
        
        if data is None or 'pid' not in data.columns:
            print("错误: 数据缺失或没有必要的列 (pid)")
            return None
            
        # 如果数据中没有MET列，无法继续分析
        if 'MET' not in data.columns:
            print("错误: 数据中缺少MET列，无法进行活动强度分析")
            return None
            
        # 确保数据有时间信息用于计算持续时间
        if 'time_diff' not in data.columns:
            if 'timestamp' in data.columns:
                # 转换时间戳为datetime对象
                try:
                    if not pd.api.types.is_datetime64_any_dtype(data['timestamp']):
                        data['datetime'] = pd.to_datetime(data['timestamp'])
                    else:
                        data['datetime'] = data['timestamp']
                        
                    # 计算时间差
                    data = data.sort_values(['pid', 'datetime'])
                    data['time_diff'] = data.groupby('pid')['datetime'].diff().dt.total_seconds() / 3600.0
                    data['time_diff'] = data['time_diff'].fillna(0)
                except Exception as e:
                    print(f"计算时间差时出错: {e}")
                    print("使用默认时间间隔...")
                    data['time_diff'] = 1.0/60.0  # 默认1分钟间隔，转换为小时
            else:
                print("警告: 数据没有时间戳，使用默认时间间隔")
                # 假设数据以固定间隔采集
                data['time_diff'] = 1.0/60.0  # 默认1分钟间隔，转换为小时
        
        # 根据MET值分类活动
        conditions = [
            (data['MET'] < 1.0),                         # 睡眠
            (data['MET'] >= 1.0) & (data['MET'] < 1.6),  # 静态活动
            (data['MET'] >= 1.6) & (data['MET'] < 3.0),  # 低强度
            (data['MET'] >= 3.0) & (data['MET'] < 6.0),  # 中等强度
            (data['MET'] >= 6.0)                         # 高强度
        ]
        
        categories = ['睡眠', '静态活动', '低等强度运动', '中等强度运动', '高等强度运动']
        data['activity_category'] = np.select(conditions, categories, default='未分类')
        
        # 按志愿者ID和活动类别统计时长
        result = data.groupby(['pid', 'activity_category'])['time_diff'].sum().unstack(fill_value=0)
        
        # 计算总时长
        total_time = data.groupby('pid')['time_diff'].sum()
        
        # 创建结果DataFrame
        final_result = pd.DataFrame()
        final_result['志愿者ID'] = total_time.index
        final_result['记录总时长（小时）'] = total_time.values.round(4)
        
        # 确保所有活动类别的列都存在
        for category, column_name in zip(categories, 
                                         ['睡眠总时长（小时）', 
                                         '静态活动总时长（小时）', 
                                         '低等强度运动总时长（小时）', 
                                         '中等强度运动总时长（小时）', 
                                         '高等强度运动总时长（小时）']):
            if category in result.columns:
                final_result[column_name] = result[category].values.round(4)
            else:
                final_result[column_name] = 0.0000
        
        # 按照表1的顺序排列列
        column_order = ['志愿者ID', '记录总时长（小时）', '睡眠总时长（小时）', 
                        '高等强度运动总时长（小时）', '中等强度运动总时长（小时）', 
                        '低等强度运动总时长（小时）', '静态活动总时长（小时）']
        final_result = final_result[column_order]
        
        # 保存到Excel文件
        try:
            final_result.to_excel(output_file, index=False, float_format='%.4f')
            print(f"结果已保存至 {output_file}")
        except Exception as e:
            print(f"保存结果时出错: {e}")
            
        return final_result

if __name__ == "__main__":
    # 示例使用
    metadata_path = r".Python/taidi_B/file/Metadata1.csv"
    
    # 创建活动监测器并仅加载元数据
    monitor = ActivityMonitor(metadata_path=metadata_path)
    
    # 生成模拟传感器数据
    sensor_data = monitor.generate_sensor_data(n_samples=1000)
    
    # 添加模拟的MET值用于测试
    sensor_data['MET'] = np.random.uniform(0.5, 8.0, len(sensor_data))
    
    # 保存生成的传感器数据
    monitor.save_sensor_data("sensor_data.csv")
    
    # 统计志愿者活动情况并保存结果
    result_1 = monitor.analyze_volunteer_activity(output_file=".Python/taidi_B/result_1.xlsx")
    print("志愿者活动统计完成！")
    
    # 训练模型
    monitor.train()
    
    # 活动摘要
    activity_summary = monitor.activity_summary()
    
    # 保存模型
    monitor.save_model("activity_model.pkl")
    
    # 进行人口统计学分析
    monitor.analyze_by_demographic()
    
    # 预测新数据示例（使用部分训练数据作为样本）
    if monitor.data is not None:
        # 只选择基本特征列创建预测样本
        sample_data = monitor.data.sample(5)[monitor.features]
        print("\n预测样本:")
        print(sample_data)
        
        # 预测
        predictions = monitor.predict(sample_data)
        print("\n预测结果:")
        print(predictions)