import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import os
import warnings
from sklearn.utils.class_weight import compute_class_weight
warnings.filterwarnings('ignore')

# 设置随机种子以确保结果可复现
def set_seed(seed=42):
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

set_seed()

# 检查是否有可用的GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 数据加载
print("正在加载数据...")
train_data = pd.read_csv("train.csv")
train_demographics = pd.read_csv("train_demographics.csv")

# 查看数据的基本信息
print("训练数据形状:", train_data.shape)
print("人口统计数据形状:", train_demographics.shape)

# 查看目标变量的分布
print("\n目标变量（手势）分布:")
gesture_counts = train_data['gesture'].value_counts()
print(gesture_counts)

# 将数据按序列ID分组
sequence_groups = train_data.groupby('sequence_id')

# 获取唯一的手势标签和序列类型
unique_gestures = train_data['gesture'].unique()
print(f"\n不同手势类型数量: {len(unique_gestures)}")
print("手势类型:", unique_gestures)

# 对手势标签进行编码
label_encoder = LabelEncoder()
train_data['gesture_encoded'] = label_encoder.fit_transform(train_data['gesture'])
num_classes = len(unique_gestures)

# 创建特征和标签
print("\n正在准备数据...")

# 为每个序列准备数据（提取序列级别的特征和标签）
sequence_features = []
sequence_labels = []
sequence_ids = []

# 选择要使用的特征列
# IMU传感器数据
imu_columns = [col for col in train_data.columns if col.startswith('acc_') or col.startswith('rot_')]
# 热敏电阻数据
thm_columns = [col for col in train_data.columns if col.startswith('thm_')]
# 飞行时间传感器数据（子集，因为太多了）
tof_columns = [col for col in train_data.columns if col.startswith('tof_') and col.endswith(('_v0', '_v7', '_v15', '_v31', '_v63'))]

# 合并要使用的特征列
feature_columns = imu_columns + thm_columns + tof_columns

print(f"使用的特征数量: {len(feature_columns)}")

# 读取人口统计数据
train_demographics = pd.read_csv("train_demographics.csv")
# 将subject列设置为索引，便于快速查找
train_demographics.set_index("subject", inplace=True)

# 处理每个序列
for sequence_id, group in sequence_groups:
    gesture = group['gesture'].iloc[0]
    gesture_encoded = group['gesture_encoded'].iloc[0]
    subject = group['subject'].iloc[0]

    # 提取特征
    sequence_data = group[feature_columns].to_numpy(dtype=np.float32)
    sequence_data = np.nan_to_num(sequence_data, nan=-1)

    # 合并人口统计特征
    if subject in train_demographics.index:
        demo_features = train_demographics.loc[subject].values.astype(np.float32)
    else:
        demo_features = np.zeros(train_demographics.shape[1], dtype=np.float32)
    demo_features_expanded = np.tile(demo_features, (sequence_data.shape[0], 1))
    sequence_data = np.concatenate([sequence_data, demo_features_expanded], axis=1)

    # 统计特征（均值、标准差、最大、最小）
    stats_features = []
    stats_features.append(np.mean(sequence_data, axis=0))
    stats_features.append(np.std(sequence_data, axis=0))
    stats_features.append(np.max(sequence_data, axis=0))
    stats_features.append(np.min(sequence_data, axis=0))
    stats_features = np.concatenate(stats_features)

    # 添加到列表
    sequence_features.append((sequence_data, stats_features))
    sequence_labels.append(gesture_encoded)
    sequence_ids.append(sequence_id)

# 创建PyTorch数据集
class SensorDataset(Dataset):
    def __init__(self, seq_features, stats_features, labels):
        self.seq_features = seq_features
        self.stats_features = stats_features
        self.labels = labels
    def __len__(self):
        return len(self.labels)
    def __getitem__(self, idx):
        feature = self.seq_features[idx]
        if len(feature) > MAX_SEQ_LENGTH:
            feature = feature[:MAX_SEQ_LENGTH]
        else:
            padding = np.zeros((MAX_SEQ_LENGTH - len(feature), feature.shape[1]))
            feature = np.vstack([feature, padding])
        stats = self.stats_features[idx]
        return torch.FloatTensor(feature), torch.FloatTensor(stats), torch.LongTensor([self.labels[idx]])

# ===================== 可调参数常量 =====================
BATCH_SIZE = 32
MAX_SEQ_LENGTH = 100
HIDDEN_SIZE = 128
NUM_LAYERS = 2
DROPOUT = 0.5
LEARNING_RATE = 0.001
WEIGHT_DECAY = 1e-4
NUM_EPOCHS = 40
EARLY_STOPPING_PATIENCE = 7
LR_SCHEDULER_FACTOR = 0.5
LR_SCHEDULER_PATIENCE = 2
TRAIN_VAL_SPLIT = 0.2  # 验证集比例
# =======================================================

# 拆分训练集和验证集
X_seq = [x[0] for x in sequence_features]
X_stats = [x[1] for x in sequence_features]
X_seq_train, X_seq_val, X_stats_train, X_stats_val, y_train, y_val = train_test_split(
    X_seq, X_stats, sequence_labels, test_size=TRAIN_VAL_SPLIT, random_state=42, stratify=sequence_labels
)

# 创建数据加载器
train_dataset = SensorDataset(X_seq_train, X_stats_train, y_train)
val_dataset = SensorDataset(X_seq_val, X_stats_val, y_val)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)

# 特征标准化
X_seq = [x[0] for x in sequence_features]
X_stats = [x[1] for x in sequence_features]
all_features = np.concatenate(X_seq, axis=0)
scaler = StandardScaler()
scaler.fit(all_features)
X_seq = [scaler.transform(seq) for seq in X_seq]
# 统计特征也标准化
stats_scaler = StandardScaler()
stats_scaler.fit(X_stats)
X_stats = [stats_scaler.transform([s])[0] for s in X_stats]

# 定义带Attention的LSTM模型
class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        self.attn = nn.Linear(hidden_dim * 2, 1)
    def forward(self, lstm_output):
        # lstm_output: (batch, seq_len, hidden*2)
        attn_weights = torch.softmax(self.attn(lstm_output), dim=1)  # (batch, seq_len, 1)
        context = torch.sum(attn_weights * lstm_output, dim=1)  # (batch, hidden*2)
        return context

class LSTMClassifier(nn.Module):
    def __init__(self, input_size, stats_size, hidden_size, num_layers, num_classes, dropout=0.5):
        super(LSTMClassifier, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bidirectional = True
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout if num_layers > 1 else 0, bidirectional=self.bidirectional)
        self.attention = Attention(hidden_size)
        self.fc1 = nn.Linear(hidden_size * 2 + stats_size, 128)
        self.fc2 = nn.Linear(128, num_classes)
        self.dropout = nn.Dropout(dropout)
    def forward(self, x, stats):
        batch_size = x.size(0)
        h0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)
        out, _ = self.lstm(x, (h0, c0))
        context = self.attention(out)
        combined = torch.cat([context, stats], dim=1)
        out = self.dropout(torch.relu(self.fc1(combined)))
        out = self.fc2(out)
        return out

# 初始化模型
input_size = X_seq_train[0].shape[1]
stats_size = len(X_stats_train[0])
model = LSTMClassifier(input_size, stats_size, HIDDEN_SIZE, NUM_LAYERS, num_classes, dropout=DROPOUT)
model.to(device)

# 类别权重处理
class_weights = compute_class_weight('balanced', classes=np.unique(y_train), y=y_train)
class_weights = torch.tensor(class_weights, dtype=torch.float).to(device)
criterion = nn.CrossEntropyLoss(weight=class_weights)
# 优化器加L2正则
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)

# EarlyStopping和学习率调度
class EarlyStopping:
    def __init__(self, patience=5, verbose=False):
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.inf

    def __call__(self, val_loss, model):
        score = -val_loss
        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
        elif score < self.best_score:
            self.counter += 1
            if self.verbose:
                print(f'EarlyStopping 计数: {self.counter} / {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
            self.counter = 0

    def save_checkpoint(self, val_loss, model):
        torch.save(model.state_dict(), 'best_model.pth')
        self.val_loss_min = val_loss

scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=LR_SCHEDULER_FACTOR, patience=LR_SCHEDULER_PATIENCE)
early_stopping = EarlyStopping(patience=EARLY_STOPPING_PATIENCE, verbose=True)

# 训练模型
num_epochs = 40
train_losses = []
val_losses = []
val_accuracies = []

print("\n开始训练模型...")
for epoch in range(num_epochs):
    model.train()
    train_loss = 0
    for batch_idx, (batch_features, batch_stats, batch_labels) in enumerate(train_loader):
        batch_features = batch_features.to(device)
        batch_stats = batch_stats.to(device)
        batch_labels = batch_labels.squeeze().to(device)
        outputs = model(batch_features, batch_stats)
        loss = criterion(outputs, batch_labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        # 批次打印
        # print(f"[训练] Batch {batch_idx+1}/{len(train_loader)}: features shape={batch_features.shape}, stats shape={batch_stats.shape}, labels shape={batch_labels.shape}, outputs shape={outputs.shape}")
    train_loss = train_loss / len(train_loader)
    train_losses.append(train_loss)
    model.eval()
    val_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (batch_features, batch_stats, batch_labels) in enumerate(val_loader):
            batch_features = batch_features.to(device)
            batch_stats = batch_stats.to(device)
            batch_labels = batch_labels.squeeze().to(device)
            outputs = model(batch_features, batch_stats)
            loss = criterion(outputs, batch_labels)
            val_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += batch_labels.size(0)
            correct += (predicted == batch_labels).sum().item()
            # 批次打印
            # print(f"[验证] Batch {batch_idx+1}/{len(val_loader)}: features shape={batch_features.shape}, stats shape={batch_stats.shape}, labels shape={batch_labels.shape}, outputs shape={outputs.shape}")
    val_loss = val_loss / len(val_loader)
    val_losses.append(val_loss)
    val_accuracy = 100 * correct / total
    val_accuracies.append(val_accuracy)
    scheduler.step(val_loss)
    early_stopping(val_loss, model)
    print(f'第 {epoch+1}/{num_epochs} 轮，训练损失: {train_loss:.4f}，验证损失: {val_loss:.4f}，验证准确率: {val_accuracy:.2f}%')
    if early_stopping.early_stop:
        print("提前停止训练，已触发EarlyStopping！")
        break

# 绘制损失曲线
plt.figure(figsize=(10, 5))
plt.plot(train_losses, label='Training Loss')
plt.plot(val_losses, label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.savefig('training_loss_chart.png')
plt.close()

# 保存模型
print("\n最佳模型已通过EarlyStopping自动保存为'best_model.pth'...")

# 模型评估
model.eval()
all_preds = []
all_labels = []
with torch.no_grad():
    for batch_features, batch_stats, batch_labels in val_loader:
        batch_features = batch_features.to(device)
        batch_stats = batch_stats.to(device)
        batch_labels = batch_labels.squeeze().to(device)
        outputs = model(batch_features, batch_stats)
        _, predicted = torch.max(outputs.data, 1)
        all_preds.extend(predicted.cpu().numpy())
        all_labels.extend(batch_labels.cpu().numpy())

# 打印分类报告
print("\n分类报告:")
target_names = [label for _, label in sorted(zip(range(len(unique_gestures)), unique_gestures))]
print(classification_report(all_labels, all_preds, target_names=target_names))

# 混淆矩阵
plt.figure(figsize=(15, 12))
cm = confusion_matrix(all_labels, all_preds)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=target_names, yticklabels=target_names)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.tight_layout()
plt.savefig('confusion_matrix.png')
plt.close()

print("\n训练完成！模型已保存为'best_model.pth'")
