import os
import sys
import json
import argparse
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader

PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))

from model import STSupervised


def derive_round_dir(cfg):
    base = os.path.join(
        cfg["output_dir"],
        cfg["dataset"],
        f"{cfg['feature_type']}-tw-{cfg['time_window']}ol-{cfg['overlap']}",
        cfg["experiment_mode"],
        cfg["split_type"],
        f"round_{int(cfg.get('round_index', 1))}"
    )
    return base


def load_split(split_dir, name):
    obj = torch.load(os.path.join(split_dir, f"{name}.pt"), map_location='cpu')
    x = obj["samples"]
    y = obj["labels"]
    return x, y


def make_loader(x, y, batch_size, shuffle=True, num_workers=0):
    ds = TensorDataset(x, y)
    return DataLoader(ds, batch_size=batch_size, shuffle=shuffle, pin_memory=True, num_workers=num_workers)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default=os.path.join(PROJECT_ROOT, 'Preprocess_data', 'configs', 'deap_config.json'))
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--d', type=int, default=4)
    parser.add_argument('--k', type=int, default=9)
    parser.add_argument('--s', type=int, default=1)
    parser.add_argument('--proj_dim', type=int, default=64)
    parser.add_argument('--attn_hidden', type=int, default=32)
    parser.add_argument('--temp_hidden', type=int, default=128)
    parser.add_argument('--temp_layers', type=int, default=3)
    parser.add_argument('--weight_decay', type=float, default=0.0)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--gpu', type=str, default='0')
    parser.add_argument('--use_spatial', action='store_true', help='启用空间分支')
    parser.add_argument('--use_temporal', action='store_true', help='启用时序分支（若两者都不传，默认仅时序）')
    parser.add_argument('--use_spatial_gcn', action='store_true', help='启用空间GCN分支')
    args = parser.parse_args()

    if len(args.gpu) > 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    with open(args.config, 'r', encoding='utf-8') as f:
        cfg = json.load(f)
    round_dir = derive_round_dir(cfg)
    print(f"Training data directory: {round_dir}")

    train_x, train_y = load_split(round_dir, 'train')
    val_x, val_y = (None, None)
    val_path = os.path.join(round_dir, 'val.pt')
    if os.path.isfile(val_path):
        val_x, val_y = load_split(round_dir, 'val')
    test_x, test_y = load_split(round_dir, 'test')

    c = int(train_x.shape[1])
    t = int(train_x.shape[2])
    num_classes = int(max(train_y.max().item(), test_y.max().item()) + 1)

    # 若未显式指定任一分支，则默认启用 空间(EEGNet)+时序；空间GCN默认关闭
    if (args.use_spatial or args.use_temporal or args.use_spatial_gcn):
        use_spatial = bool(args.use_spatial)
        use_temporal = bool(args.use_temporal)
        use_spatial_gcn = bool(args.use_spatial_gcn)
    else:
        use_spatial = True
        use_temporal = True
        use_spatial_gcn = False

    model = STSupervised(
        in_channels=c,
        num_classes=num_classes,
        d=args.d, k=args.k, s=args.s,
        proj_dim=args.proj_dim,
        attn_hidden=args.attn_hidden,
        temp_hidden=args.temp_hidden,
        temp_layers=args.temp_layers,
        dropout=args.dropout,
        use_spatial=use_spatial,
        use_temporal=use_temporal,
        use_spatial_gcn=use_spatial_gcn,
    ).to(device)

    opt = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    criterion = nn.CrossEntropyLoss()

    train_loader = make_loader(train_x.float(), train_y.long(), args.batch_size, True, args.num_workers)
    val_loader = make_loader(val_x.float(), val_y.long(), args.batch_size, False, args.num_workers) if val_x is not None else None
    test_loader = make_loader(test_x.float(), test_y.long(), args.batch_size, False, args.num_workers)

    save_dir = os.path.join(os.path.dirname(__file__), 'save')
    os.makedirs(save_dir, exist_ok=True)
    best_path = os.path.join(save_dir, 'best.pth')
    best_acc = -1.0

    for epoch in range(1, args.epochs + 1):
        model.train()
        tp, ta = 0, 0
        tr_loss_sum = 0.0
        for xb, yb in train_loader:
            xb = xb.to(device)
            yb = yb.to(device)
            opt.zero_grad(set_to_none=True)
            logits, _ = model(xb)
            loss = criterion(logits, yb)
            loss.backward()
            opt.step()
            tp += (logits.argmax(1) == yb).sum().item()
            ta += yb.size(0)
            tr_loss_sum += loss.item() * yb.size(0)
        tr_acc = tp / max(1, ta)
        tr_loss = tr_loss_sum / max(1, ta)

        if val_loader is not None:
            model.eval()
            vp, va = 0, 0
            val_loss_sum = 0.0
            with torch.no_grad():
                for xb, yb in val_loader:
                    xb = xb.to(device)
                    yb = yb.to(device)
                    logits, _ = model(xb)
                    vloss = criterion(logits, yb)
                    vp += (logits.argmax(1) == yb).sum().item()
                    va += yb.size(0)
                    val_loss_sum += vloss.item() * yb.size(0)
            val_acc = vp / max(1, va)
            val_loss = val_loss_sum / max(1, va)
            print(f"epoch {epoch:03d} | tr_loss={tr_loss:.4f} tr_acc={tr_acc:.4f} | val_loss={val_loss:.4f} val_acc={val_acc:.4f}")
            if val_acc > best_acc:
                best_acc = val_acc
                torch.save(model.state_dict(), best_path)
        else:
            print(f"epoch {epoch:03d} | tr_loss={tr_loss:.4f} tr_acc={tr_acc:.4f}")
            if tr_acc > best_acc:
                best_acc = tr_acc
                torch.save(model.state_dict(), best_path)

    if os.path.isfile(best_path):
        model.load_state_dict(torch.load(best_path, map_location=device))

    model.eval()
    ps, ys = [], []
    test_loss_sum = 0.0
    with torch.no_grad():
        for xb, yb in test_loader:
            xb = xb.to(device)
            logits, _ = model(xb)
            tloss = criterion(logits, yb.to(device))
            ps.append(logits.argmax(1).cpu())
            ys.append(yb)
            test_loss_sum += tloss.item() * yb.size(0)
    ps = torch.cat(ps, 0)
    ys = torch.cat(ys, 0)

    acc = (ps == ys).float().mean().item()
    test_loss = test_loss_sum / max(1, ys.size(0))
    print(f"Test loss={test_loss:.4f} acc={acc:.4f}")

    # 打印混淆矩阵（行=true，列=pred）
    num_classes = int(ys.max().item() + 1)
    cm = torch.zeros((num_classes, num_classes), dtype=torch.long)
    for t, p in zip(ys.view(-1), ps.view(-1)):
        cm[t.long(), p.long()] += 1
    print("Confusion matrix (rows=true, cols=pred):")
    for r in cm.tolist():
        print(" ".join(str(int(x)) for x in r))


if __name__ == '__main__':
    main()
