import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, DataLoader
import torch

class HARDataset(Dataset):
    """
    人类活动识别数据集
    
    参数:
        root_dir (str): UCI HAR数据集的根目录
        split (str): 'train'或'test'，指定使用训练集还是测试集
        transform (callable, optional): 可选的数据转换
    """
    def __init__(self, root_dir, split='train', transform=None):
        self.root_dir = root_dir
        self.split = split
        self.transform = transform
        
        # 加载活动标签
        self.activity_labels = pd.read_csv(
            os.path.join(root_dir, 'activity_labels.txt'), 
            sep=' ', 
            header=None,
            names=['id', 'activity']
        )
        
        # 加载特征名称
        self.features = pd.read_csv(
            os.path.join(root_dir, 'features.txt'),
            sep=' ',
            header=None,
            names=['id', 'feature']
        )
        
        # 加载数据
        self._load_data()
        
    def _load_data(self):
        """加载数据集"""
        # 加载X数据（特征）
        x_path = os.path.join(self.root_dir, self.split, f'X_{self.split}.txt')
        
        # 使用更健壮的方式读取数据
        try:
            # 首先尝试使用Python引擎
            self.X = pd.read_csv(x_path, sep=' ', header=None, engine='python')
        except Exception as e:
            print(f"使用Python引擎读取失败: {e}")
            try:
                # 使用正则表达式分隔符代替delim_whitespace
                self.X = pd.read_csv(x_path, sep=r'\s+', header=None, engine='python')
            except Exception as e:
                print(f"使用正则表达式分隔符读取失败: {e}")
                # 最后尝试手动读取文件
                with open(x_path, 'r') as f:
                    lines = f.readlines()
                
                # 找出最常见的列数
                col_counts = {}
                for line in lines:
                    cols = len(line.strip().split())
                    col_counts[cols] = col_counts.get(cols, 0) + 1
                
                most_common_cols = max(col_counts.items(), key=lambda x: x[1])[0]
                print(f"最常见的列数: {most_common_cols}")
                
                # 只保留具有正确列数的行
                valid_lines = [line for line in lines if len(line.strip().split()) == most_common_cols]
                print(f"有效行数: {len(valid_lines)}/{len(lines)}")
                
                # 将有效行转换为DataFrame
                data = []
                for line in valid_lines:
                    data.append(line.strip().split())
                
                self.X = np.array(data, dtype=float)
        
        # 加载y数据（活动标签）
        y_path = os.path.join(self.root_dir, self.split, f'y_{self.split}.txt')
        self.y = pd.read_csv(y_path, header=None)
        
        # 加载subject数据（受试者ID）
        subject_path = os.path.join(self.root_dir, self.split, f'subject_{self.split}.txt')
        self.subjects = pd.read_csv(subject_path, header=None)
        
        # 确保数据长度一致
        min_len = min(len(self.X), len(self.y), len(self.subjects))
        if min_len < len(self.X):
            print(f"警告: 截断X数据从{len(self.X)}到{min_len}")
            self.X = self.X[:min_len]
        if min_len < len(self.y):
            print(f"警告: 截断y数据从{len(self.y)}到{min_len}")
            self.y = self.y.values[:min_len]
        if min_len < len(self.subjects):
            print(f"警告: 截断subjects数据从{len(self.subjects)}到{min_len}")
            self.subjects = self.subjects.values[:min_len]
        
        # 将数据转换为numpy数组
        self.X = self.X if isinstance(self.X, np.ndarray) else self.X.values
        self.y = self.y.values.ravel() - 1  # 将标签从1-6转换为0-5
        self.subjects = self.subjects.values.ravel()
        
    def __len__(self):
        """返回数据集大小"""
        return len(self.X)
    
    def __getitem__(self, idx):
        """获取数据集中的一个样本"""
        if torch.is_tensor(idx):
            idx = idx.tolist()
            
        features = self.X[idx]
        label = self.y[idx]
        subject = self.subjects[idx]
        
        sample = {
            'features': features,
            'label': label,
            'subject': subject
        }
        
        if self.transform:
            sample = self.transform(sample)
            
        return sample

class ToTensor:
    """将样本转换为PyTorch张量"""
    def __call__(self, sample):
        features, label, subject = sample['features'], sample['label'], sample['subject']
        
        # 转换为浮点张量
        features = torch.tensor(features, dtype=torch.float32)
        label = torch.tensor(label, dtype=torch.long)
        subject = torch.tensor(subject, dtype=torch.long)
        
        return {
            'features': features,
            'label': label,
            'subject': subject
        }

class Normalize:
    """标准化特征"""
    def __init__(self, mean=None, std=None):
        self.scaler = StandardScaler()
        self.mean = mean
        self.std = std
        self.fitted = False
        
    def __call__(self, sample):
        features, label, subject = sample['features'], sample['label'], sample['subject']
        
        if not self.fitted and self.mean is None and self.std is None:
            # 如果没有提供均值和标准差，则使用当前样本进行拟合
            features = self.scaler.fit_transform([features])[0]
            self.fitted = True
            self.mean = self.scaler.mean_
            self.std = self.scaler.scale_
        else:
            # 使用提供的均值和标准差进行标准化
            if self.mean is not None and self.std is not None:
                features = (features - self.mean) / self.std
        
        return {
            'features': features,
            'label': label,
            'subject': subject
        }

def get_har_dataloader(root_dir, batch_size=64, split='train', shuffle=True, num_workers=4):
    """
    创建HAR数据集的DataLoader
    
    参数:
        root_dir (str): UCI HAR数据集的根目录
        batch_size (int): 批次大小
        split (str): 'train'或'test'，指定使用训练集还是测试集
        shuffle (bool): 是否打乱数据
        num_workers (int): 数据加载的工作线程数
        
    返回:
        DataLoader: PyTorch数据加载器
    """
    # 定义转换
    transforms = [Normalize(), ToTensor()]
    
    # 创建数据集
    dataset = HARDataset(
        root_dir=root_dir,
        split=split,
        transform=lambda x: transforms[1](transforms[0](x))
    )
    
    # 创建数据加载器
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers
    )
    
    return dataloader