import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, random_split
import scipy.io as sio
import numpy as np
from model import EMGCNN
import matplotlib.pyplot as plt
import itertools
from torch.utils.tensorboard import SummaryWriter
import os
from datetime import datetime
import random

def preprocess_and_cache(mat_files, cache_file="ninapro_cache.pt", window_size=200, step_size=5):
    if os.path.exists(cache_file):
        print(f"Loading preprocessed data from {cache_file}")
        return torch.load(cache_file)

    X, y = load_all_data(mat_files, window_size, step_size)
    torch.save((X, y), cache_file)
    print(f"Saved preprocessed data to {cache_file}")
    return X, y


def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix'):
    if normalize:
        with np.errstate(all='ignore'):  # 忽略 0 除错误
            cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
        cm = np.nan_to_num(cm)  # 把 NaN/inf 转成 0

    fig, ax = plt.subplots(figsize=(8, 8))
    im = ax.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
    ax.figure.colorbar(im, ax=ax)

    ax.set(
        xticks=np.arange(cm.shape[1]),
        yticks=np.arange(cm.shape[0]),
        xticklabels=classes,
        yticklabels=classes,
        ylabel="True label",
        xlabel="Predicted label",
        title=title,
    )
    plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")

    fmt = ".2f" if normalize else "d"
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        ax.text(j, i, format(cm[i, j], fmt),
                ha="center", va="center",
                color="white" if cm[i, j] > thresh else "black")

    fig.tight_layout()
    return fig


# --------------------------
# Step 1: 预加载所有数据到内存
# --------------------------
def load_all_data(mat_files, window_size=200, step_size=20, ignore_rest=True):
    X_all, y_all = [], []

    for file in mat_files:
        data = sio.loadmat(file)
        emg = data['emg']                  # shape: (N, 10)
        stim = data['stimulus'].squeeze()  # shape: (N,)

        N = len(stim)
        for start in range(0, N - window_size, step_size):
            end = start + window_size
            label = np.bincount(stim[start:end]).argmax()
            if ignore_rest and label == 0:
                continue

            x = emg[start:end, :]          # (200, 10)
            X_all.append(x.T)              # 转成 (channels, window_size)
            y_all.append(label)

    X_all = np.stack(X_all)                # (num_samples, 10, 200)
    y_all = np.array(y_all)

    print(f"Preloaded dataset: {X_all.shape}, labels: {y_all.shape}")
    return torch.tensor(X_all, dtype=torch.float32), torch.tensor(y_all, dtype=torch.long)

# --------------------------
# Step 2: 构造 DataLoader
# --------------------------

def build_dataloader_from_tensors(X, y, batch_size=256, num_workers=4):
    dataset = TensorDataset(X, y)
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
    train_loader = DataLoader(train_dataset, batch_size=batch_size,
                              shuffle=True, num_workers=num_workers, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size,
                            shuffle=False, num_workers=num_workers, pin_memory=True)
    return train_loader, val_loader


def build_dataloader(mat_files, batch_size=256, num_workers=4, device="cuda"):
    # X, y = load_all_data(mat_files)
    X, y = preprocess_and_cache(mat_files)
    dataset = TensorDataset(X, y)

    # 划分训练 / 验证集 (80/20)
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = random_split(dataset, [train_size, val_size])

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers,
                              pin_memory=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers,
                            pin_memory=True)
    return train_loader, val_loader


# --------------------------
# Step 4: 训练循环 + TensorBoard
# --------------------------
def train_model(mat_files, epochs=50, device="cuda"):
    # train_loader, val_loader = build_dataloader(mat_files, batch_size=256, num_workers=4, device=device)
    train_loader, val_loader  = build_dataloader_from_tensors(*preprocess_and_cache(mat_files), batch_size=1024, num_workers=4)
    model = EMGCNN(num_classes=51).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    num_classes = 51
    # 创建 TensorBoard 日志目录
    logdir = os.path.join("runs", datetime.now().strftime("%Y%m%d-%H%M%S"))
    writer = SummaryWriter(log_dir=logdir)

    # 添加模型计算图 (只需一次)
    example_X, _ = next(iter(train_loader))
    writer.add_graph(model, example_X.to(device))

    for epoch in range(epochs):
        # --- Training ---
        model.train()
        running_loss, correct, total = 0.0, 0, 0

        for X, y in train_loader:
            X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)

            optimizer.zero_grad()
            outputs = model(X)
            loss = criterion(outputs, y)
            loss.backward()
            optimizer.step()

            running_loss += loss.item() * X.size(0)
            _, predicted = outputs.max(1)
            total += y.size(0)
            correct += predicted.eq(y).sum().item()

        train_loss = running_loss / total
        train_acc = correct / total * 100

        # --- Validation ---
        model.eval()
        val_loss, val_correct, val_total = 0.0, 0, 0
        all_preds, all_labels = [], []

        with torch.no_grad():
            for X, y in val_loader:
                X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
                outputs = model(X)
                loss = criterion(outputs, y)

                val_loss += loss.item() * X.size(0)
                _, predicted = outputs.max(1)
                val_total += y.size(0)
                val_correct += predicted.eq(y).sum().item()

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(y.cpu().numpy())

        val_loss /= val_total
        val_acc = val_correct / val_total * 100

        # --- Logging ---
        writer.add_scalar("Loss/train", train_loss, epoch)
        writer.add_scalar("Loss/val", val_loss, epoch)
        writer.add_scalar("Accuracy/train", train_acc, epoch)
        writer.add_scalar("Accuracy/val", val_acc, epoch)


        # --- Confusion Matrix ---
        from sklearn.metrics import confusion_matrix

        cm = confusion_matrix(all_labels, all_preds, labels=list(range(num_classes)))
        fig = plot_confusion_matrix(cm, classes=[str(i) for i in range(num_classes)], normalize=True)
        writer.add_figure("ConfusionMatrix", fig, epoch)
        plt.close(fig)

        print(f"Epoch [{epoch+1}/{epochs}] "
              f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}% | "
              f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%")

    writer.close()
    return model

def check_dataset(mat_files):
        X, y = preprocess_and_cache(mat_files)
        print(f"Dataset shape: {X.shape}, Labels shape: {y.shape}")
        indices = random.sample(range(X.shape[0]), 3)
        fig, axes = plt.subplots(3, 1, figsize=(12, 8))
        for i, idx in enumerate(indices):
            axes[i].plot(X[idx].cpu().numpy().T)
            axes[i].set_title(f"Sample {idx} - Label: {y[idx].item()}")
            axes[i].set_ylabel("EMG Value")
            axes[i].set_xlabel("Time")
        plt.tight_layout()
        plt.show()

# --------------------------
# 使用示例
# --------------------------
if __name__ == "__main__":



    mat_files = [
        r"D:\project\muscle_net\dino\S1_A1_E1.mat",
        r"D:\project\muscle_net\dino\S1_A1_E2.mat",
        r"D:\project\muscle_net\dino\S1_A1_E3.mat",
        r"D:\project\muscle_net\dino\S2_A1_E1.mat",
        r"D:\project\muscle_net\dino\S2_A1_E2.mat",
        r"D:\project\muscle_net\dino\S2_A1_E3.mat",
        r"D:\project\muscle_net\dino\S3_A1_E1.mat",
        r"D:\project\muscle_net\dino\S3_A1_E2.mat",
        r"D:\project\muscle_net\dino\S3_A1_E3.mat",
        r"D:\project\muscle_net\dino\S4_A1_E1.mat",
        r"D:\project\muscle_net\dino\S4_A1_E2.mat",
        r"D:\project\muscle_net\dino\S4_A1_E3.mat",
        r"D:\project\muscle_net\dino\S5_A1_E1.mat",
        r"D:\project\muscle_net\dino\S5_A1_E2.mat",
        r"D:\project\muscle_net\dino\S5_A1_E3.mat",
        r"D:\project\muscle_net\dino\S6_A1_E1.mat",
        r"D:\project\muscle_net\dino\S6_A1_E2.mat",
        r"D:\project\muscle_net\dino\S6_A1_E3.mat",
        r"D:\project\muscle_net\dino\S7_A1_E1.mat",
        r"D:\project\muscle_net\dino\S7_A1_E2.mat",
        r"D:\project\muscle_net\dino\S7_A1_E3.mat",
        r"D:\project\muscle_net\dino\S8_A1_E1.mat",
        r"D:\project\muscle_net\dino\S8_A1_E2.mat",
        r"D:\project\muscle_net\dino\S8_A1_E3.mat",
        r"D:\project\muscle_net\dino\S9_A1_E1.mat",
        r"D:\project\muscle_net\dino\S9_A1_E2.mat",
        r"D:\project\muscle_net\dino\S9_A1_E3.mat",
        r"D:\project\muscle_net\dino\S10_A1_E1.mat",
        r"D:\project\muscle_net\dino\S10_A1_E2.mat",
        r"D:\project\muscle_net\dino\S10_A1_E3.mat",
    ]
    
    # check_dataset(mat_files)
    train_model(mat_files, epochs=50, device="cuda")
