# utils/data_processor.py
import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
import json
import joblib
from config import Config


class CSVDataProcessor:
    """CSV数据处理器 - 优化版适用于小数据量"""

    def __init__(self):
        self.config = Config()
        self.scaler = StandardScaler()
        self.label_encoder = LabelEncoder()

    def classify_files_by_name(self, data_dir):
        """根据文件名自动分类文件"""
        print("正在根据文件名分类数据文件...")

        classified_files = {activity: [] for activity in self.config.ACTIVITY_MAPPING.keys()}

        for activity in self.config.ACTIVITY_MAPPING.keys():
            activity_dir = os.path.join(data_dir, activity)
            if os.path.exists(activity_dir):
                for file in os.listdir(activity_dir):
                    if file.endswith('.csv'):
                        classified_files[activity].append(os.path.join(activity_dir, file))

        # 统计文件数量
        total_files = 0
        for activity, files in classified_files.items():
            print(f"{activity}: {len(files)} 个文件")
            total_files += len(files)

        print(f"总共找到 {total_files} 个数据文件")
        return classified_files

    def load_single_csv(self, file_path):
        """加载单个CSV文件"""
        try:
            df = pd.read_csv(file_path)
            print(f"加载文件: {os.path.basename(file_path)}, 形状: {df.shape}")
            return df
        except Exception as e:
            print(f"加载文件 {file_path} 失败: {e}")
            return None

    def extract_features_from_csv(self, df, label):
        """从单个CSV文件提取特征序列 - 优化版"""
        if df is None:
            return [], []

        # 检查必要的列
        required_columns = ['x_value', 'y_value', 'z_value']
        if not all(col in df.columns for col in required_columns):
            print(f"文件缺少必要的列，可用的列: {df.columns.tolist()}")
            return [], []

        # 如果数据量太少，使用所有数据作为一个序列
        if len(df) < self.config.SEQUENCE_LENGTH:
            # 填充数据到最小序列长度
            if len(df) >= self.config.MIN_SEQUENCE_LENGTH:
                # 重复数据直到达到序列长度
                repeated_data = []
                while len(repeated_data) < self.config.SEQUENCE_LENGTH:
                    remaining = self.config.SEQUENCE_LENGTH - len(repeated_data)
                    repeated_data.extend(df[required_columns].values[:remaining])

                sequence = np.array(repeated_data[:self.config.SEQUENCE_LENGTH])
                return [sequence], [label]
            else:
                print(f"  数据量太少 ({len(df)} 行)，无法生成序列")
                return [], []

        # 正常情况：数据量足够
        sensor_data = df[required_columns].values

        sequences = []
        labels = []

        seq_length = self.config.SEQUENCE_LENGTH
        step_size = max(1, int(seq_length * self.config.OVERLAP_RATIO))  # 确保至少为1

        for i in range(0, len(sensor_data) - seq_length + 1, step_size):
            sequence = sensor_data[i:i + seq_length]
            sequences.append(sequence)
            labels.append(label)

        return sequences, labels

    def process_all_data(self, data_dir):
        """处理所有数据 - 优化版"""
        print("开始处理所有数据文件...")

        # 分类文件
        classified_files = self.classify_files_by_name(data_dir)

        all_sequences = []
        all_labels = []

        for activity, files in classified_files.items():
            if not files:
                continue

            print(f"\n处理 {activity} 数据...")
            label = self.config.ACTIVITY_MAPPING[activity]

            activity_sequences = 0
            for file_path in files:
                df = self.load_single_csv(file_path)
                if df is not None:
                    sequences, labels = self.extract_features_from_csv(df, label)
                    all_sequences.extend(sequences)
                    all_labels.extend(labels)
                    activity_sequences += len(sequences)
                    print(f"  {os.path.basename(file_path)}: 生成 {len(sequences)} 个序列")

            print(f"  {activity} 总共生成: {activity_sequences} 个序列")

        # 转换为numpy数组
        if all_sequences:
            all_sequences = np.array(all_sequences)
            all_labels = np.array(all_labels)

            print(f"\n数据处理完成!")
            print(f"总序列数: {len(all_sequences)}")
            print(f"序列形状: {all_sequences.shape}")

            # 统计标签分布
            unique_labels, counts = np.unique(all_labels, return_counts=True)
            print("标签分布:")
            for label, count in zip(unique_labels, counts):
                activity_name = self.config.ACTIVITY_LABELS[label]
                print(f"  {activity_name}: {count} 个序列")

            return all_sequences, all_labels
        else:
            print("没有生成有效的序列数据")
            return None, None

    def prepare_training_data(self, sequences, labels):
        """准备训练数据"""
        print("准备训练数据...")

        if sequences is None or len(sequences) == 0:
            return None

        # 编码标签
        encoded_labels = self.label_encoder.fit_transform(labels)

        # 转换为分类格式
        from tensorflow.keras.utils import to_categorical
        categorical_labels = to_categorical(encoded_labels, self.config.NUM_CLASSES)

        # 标准化序列数据
        original_shape = sequences.shape
        sequences_flat = sequences.reshape(-1, sequences.shape[-1])
        sequences_scaled = self.scaler.fit_transform(sequences_flat)
        sequences_scaled = sequences_scaled.reshape(original_shape)

        # 如果数据量很少，使用不同的划分策略
        if len(sequences) < 20:
            print("数据量较少，使用留一法交叉验证")
            # 简单划分：80%训练，20%测试
            X_train, X_test, y_train, y_test = train_test_split(
                sequences_scaled, categorical_labels,
                test_size=0.2,
                random_state=42
            )
            X_val = X_test  # 验证集和测试集相同
            y_val = y_test
        else:
            # 正常划分
            X_train, X_test, y_train, y_test = train_test_split(
                sequences_scaled, categorical_labels,
                test_size=self.config.TEST_SPLIT,
                random_state=42,
                stratify=encoded_labels
            )

            X_train, X_val, y_train, y_val = train_test_split(
                X_train, y_train,
                test_size=self.config.VALIDATION_SPLIT,
                random_state=42,
                stratify=encoded_labels[:len(X_train)]
            )

        print(f"训练集: {X_train.shape}")
        print(f"验证集: {X_val.shape}")
        print(f"测试集: {X_test.shape}")

        return X_train, X_val, X_test, y_train, y_val, y_test

    def save_preprocessor(self):
        """保存预处理器"""
        preprocessor_data = {
            'scaler': self.scaler,
            'label_encoder': self.label_encoder,
            'config': {
                'sequence_length': self.config.SEQUENCE_LENGTH,
                'num_features': self.config.NUM_FEATURES
            }
        }

        joblib.dump(preprocessor_data, self.config.get_preprocessor_path())
        print(f"预处理器已保存到: {self.config.get_preprocessor_path()}")

    def load_preprocessor(self):
        """加载预处理器"""
        preprocessor_data = joblib.load(self.config.get_preprocessor_path())
        self.scaler = preprocessor_data['scaler']
        self.label_encoder = preprocessor_data['label_encoder']
        print("预处理器加载成功")

    def process_single_test_file(self, file_path):
        """处理单个测试文件"""
        print(f"处理测试文件: {os.path.basename(file_path)}")

        df = self.load_single_csv(file_path)
        if df is None:
            return None

        # 检查必要的列
        required_columns = ['x_value', 'y_value', 'z_value']
        if not all(col in df.columns for col in required_columns):
            print(f"文件缺少必要的列，可用的列: {df.columns.tolist()}")
            return None

        # 提取传感器数据
        sensor_data = df[required_columns].values

        # 创建序列
        sequences = []
        seq_length = self.config.SEQUENCE_LENGTH

        # 如果数据量太少，使用所有数据作为一个序列
        if len(sensor_data) < seq_length:
            if len(sensor_data) >= self.config.MIN_SEQUENCE_LENGTH:
                # 重复数据直到达到序列长度
                repeated_data = []
                while len(repeated_data) < seq_length:
                    remaining = seq_length - len(repeated_data)
                    repeated_data.extend(sensor_data[:remaining])

                sequence = np.array(repeated_data[:seq_length])
                sequences.append(sequence)
            else:
                print(f"  数据量太少 ({len(sensor_data)} 行)，无法生成序列")
                return None
        else:
            # 正常情况：数据量足够
            step_size = max(1, int(seq_length * self.config.OVERLAP_RATIO))

            for i in range(0, len(sensor_data) - seq_length + 1, step_size):
                sequence = sensor_data[i:i + seq_length]
                sequences.append(sequence)

        if len(sequences) == 0:
            print("无法从该文件生成序列")
            return None

        # 标准化
        sequences = np.array(sequences)
        original_shape = sequences.shape
        sequences_flat = sequences.reshape(-1, sequences.shape[-1])
        sequences_scaled = self.scaler.transform(sequences_flat)
        sequences_scaled = sequences_scaled.reshape(original_shape)

        print(f"生成 {len(sequences)} 个测试序列")
        return sequences_scaled