"""
基于地震波的结构损伤识别Demo
文件结构：
- main.py (本文件)
- data/
  - earthquake_data.csv (示例数据文件)
- outputs/ (程序自动创建)
  - loss_curve.png
  - confusion_matrix.png
"""

import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset, random_split
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns


# ==================== 配置参数 ====================
class Config:
    # 数据参数
    data_path = "data/earthquake_data.csv"  # 数据文件路径
    sample_rate = 100  # 采样率(Hz)
    window_size = 1000  # 每段数据点数（10秒）
    step = 500  # 滑动步长（50%重叠）

    # 模型参数
    n_floors = 3  # 建筑层数
    dropout_rate = 0.5  # 防止过拟合

    # 训练参数
    batch_size = 32
    learning_rate = 1e-4
    epochs = 200
    patience = 5  # 早停等待轮数

    # 设备配置
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 在load_and_preprocess函数开头添加：
if not os.path.exists(Config.data_path):
    print("Generating dummy data...")
    os.makedirs("data", exist_ok=True)
    # 生成20000个时间点的虚拟数据
    time = np.arange(0, 20000 / Config.sample_rate, 1 / Config.sample_rate)
    acc_x = np.random.normal(0, 0.2, 20000)
    acc_y = np.random.normal(0, 0.2, 20000)
    acc_z = np.random.normal(0, 0.1, 20000)
    damage = np.clip(np.cumsum(np.random.rand(20000) * 0.002), 0, 1)
    floor = np.random.randint(0, Config.n_floors, 20000)

    pd.DataFrame({
        'time': time,
        'acc_x': acc_x,
        'acc_y': acc_y,
        'acc_z': acc_z,
        'damage_index': damage,
        'floor_label': floor
    }).to_csv(Config.data_path, index=False)

# ==================== 数据预处理 ====================
def load_and_preprocess(config):
    """加载并预处理数据"""
    # 加载CSV文件（示例格式：time,acc_x,acc_y,acc_z,damage_index,floor_label）
    df = pd.read_csv(config.data_path)
    wave = df[['acc_x', 'acc_y', 'acc_z']].values  # 三向加速度
    labels = df[['damage_index', 'floor_label']].values

    # 数据预处理
    def process(wave_data):
        # 归一化（按通道独立）
        max_vals = np.max(np.abs(wave_data), axis=0)
        normalized = wave_data / max_vals

        # 分帧处理
        segments = []
        for i in range(0, len(normalized) - config.window_size, config.step):
            seg = normalized[i:i + config.window_size, :]
            segments.append(seg.transpose(1, 0))  # [3, 1000]
        return np.array(segments)

    # 处理数据并匹配标签
    segments = process(wave)
    n_samples = len(segments)
    print(f"Segmented data quantity: {n_samples}")
    matched_labels = labels[config.window_size // 2: n_samples * config.step + config.window_size // 2: config.step]
    print(f"After matching tag sample quantity: {len(matched_labels)}")

    # 转换为Tensor
    X = torch.tensor(segments, dtype=torch.float32)
    y_index = torch.tensor(matched_labels[:, 0], dtype=torch.float32).view(-1, 1)
    y_floor = torch.tensor(matched_labels[:, 1], dtype=torch.long)

    return X, y_index, y_floor


# ==================== 模型定义 ====================
class EarthquakeDetector(nn.Module):
    """1D-CNN损伤识别模型"""

    def __init__(self, config):
        super().__init__()
        # 特征提取层
        self.features = nn.Sequential(
            nn.Conv1d(3, 64, kernel_size=50, padding=25),  # 输入3通道
            nn.ReLU(),
            nn.MaxPool1d(2),  # 1000 -> 500
            nn.Dropout(config.dropout_rate),

            nn.Conv1d(64, 128, kernel_size=25, padding=12),
            nn.ReLU(),
            nn.MaxPool1d(2)  # 500 -> 250
        )
        # 回归分支（损伤指数）
        self.regressor = nn.Sequential(
            nn.Linear(128 * 250, 64),
            nn.ReLU(),
            nn.Linear(64, 1),
            nn.Sigmoid()
        )
        # 分类分支（损伤楼层）
        self.classifier = nn.Sequential(
            nn.Linear(128 * 250, 64),
            nn.ReLU(),
            nn.Linear(64, config.n_floors),
            nn.Softmax(dim=1)
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)  # 展平
        return self.regressor(x), self.classifier(x)


# ==================== 训练与评估 ====================
def train_model(config):
    # 创建输出目录
    os.makedirs("outputs", exist_ok=True)

    # 加载数据
    X, y_index, y_floor = load_and_preprocess(config)
    dataset = TensorDataset(X, y_index, y_floor)

    # 划分训练集/验证集(8:2)
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = random_split(dataset, [train_size, val_size])

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=config.batch_size)

    # 初始化模型
    model = EarthquakeDetector(config).to(config.device)
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
    criterion_idx = nn.MSELoss()
    criterion_flr = nn.CrossEntropyLoss()

    # 训练记录
    train_losses, val_losses = [], []
    best_val_loss = float('inf')
    counter = 0

    # 训练循环
    for epoch in range(config.epochs):
        model.train()
        epoch_train_loss = 0.0

        for batch_x, batch_idx, batch_flr in train_loader:
            batch_x = batch_x.to(config.device)
            batch_idx = batch_idx.to(config.device)
            batch_flr = batch_flr.to(config.device)

            optimizer.zero_grad()
            pred_idx, pred_flr = model(batch_x)

            # 计算多任务损失
            loss_idx = criterion_idx(pred_idx, batch_idx)
            loss_flr = criterion_flr(pred_flr, batch_flr)
            loss = 0.7 * loss_idx + 0.3 * loss_flr

            loss.backward()
            optimizer.step()
            epoch_train_loss += loss.item()

        # 验证阶段
        model.eval()
        epoch_val_loss = 0.0
        with torch.no_grad():
            for batch_x, batch_idx, batch_flr in val_loader:
                batch_x = batch_x.to(config.device)
                batch_idx = batch_idx.to(config.device)
                batch_flr = batch_flr.to(config.device)

                pred_idx, pred_flr = model(batch_x)
                loss_idx = criterion_idx(pred_idx, batch_idx)
                loss_flr = criterion_flr(pred_flr, batch_flr)
                loss = 0.7 * loss_idx + 0.3 * loss_flr
                epoch_val_loss += loss.item()

        # 记录损失
        avg_train = epoch_train_loss / len(train_loader)
        avg_val = epoch_val_loss / len(val_loader)
        train_losses.append(avg_train)
        val_losses.append(avg_val)

        print(f"Epoch {epoch + 1:03d}/{config.epochs} | "
              f"Train Loss: {avg_train:.4f} | Val Loss: {avg_val:.4f}")

        # 早停与模型保存
        if avg_val < best_val_loss:
            best_val_loss = avg_val
            counter = 0
            torch.save(model.state_dict(), "outputs/best_model.pth")
        else:
            counter += 1
            if counter >= config.patience:
                print(f"Early stopping at epoch {epoch + 1}")
                break

    # 绘制损失曲线
    plt.figure(figsize=(10, 6))
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Progress')
    plt.legend()
    plt.savefig('outputs/loss_curve.png')
    plt.close()

    # 绘制混淆矩阵
    model.load_state_dict(torch.load("outputs/best_model.pth"))
    model.eval()
    all_true, all_pred = [], []

    with torch.no_grad():
        for batch_x, _, batch_flr in val_loader:
            batch_x = batch_x.to(config.device)
            _, pred_flr = model(batch_x)
            preds = torch.argmax(pred_flr, dim=1).cpu().numpy()
            all_true.extend(batch_flr.numpy())
            all_pred.extend(preds)

    cm = confusion_matrix(all_true, all_pred)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=[f"Floor {i + 1}" for i in range(config.n_floors)],
                yticklabels=[f"Floor {i + 1}" for i in range(config.n_floors)])
    plt.xlabel('Predicted')
    plt.ylabel('True')
    plt.title('Confusion Matrix')
    plt.savefig('outputs/confusion_matrix.png')
    plt.close()


# ==================== 主程序 ====================
if __name__ == "__main__":
    config = Config()
    print(f"Using device: {config.device}")
    train_model(config)
    print("Training completed. Check outputs/ for results.")