import os
import json
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader

from model import STSupervised

PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))


def derive_round_dir(cfg):
    base = os.path.join(
        cfg["output_dir"],
        cfg["dataset"],
        f"{cfg['feature_type']}-tw-{cfg['time_window']}ol-{cfg['overlap']}",
        cfg["experiment_mode"],
        cfg["split_type"],
        f"round_{int(cfg.get('round_index', 1))}"
    )
    return base


def load_split(split_dir, name):
    obj = torch.load(os.path.join(split_dir, f"{name}.pt"), map_location='cpu')
    x = obj["samples"]
    y = obj["labels"]
    trial_ids = obj.get("trial_ids")
    return x, y, trial_ids


def make_loader(x, y, batch_size, shuffle=True):
    ds = TensorDataset(x, y)
    return DataLoader(ds, batch_size=batch_size, shuffle=shuffle, pin_memory=True)


def accuracy(pred, true):
    return (pred == true).float().mean().item()


def f1_macro(pred, true):
    num_classes = int(max(true.max().item(), pred.max().item()) + 1)
    f1s = []
    for k in range(num_classes):
        tp = ((pred == k) & (true == k)).sum().item()
        fp = ((pred == k) & (true != k)).sum().item()
        fn = ((pred != k) & (true == k)).sum().item()
        p = tp / (tp + fp) if (tp + fp) > 0 else 0.0
        r = tp / (tp + fn) if (tp + fn) > 0 else 0.0
        f1 = 2 * p * r / (p + r) if (p + r) > 0 else 0.0
        f1s.append(f1)
    return float(np.mean(f1s))


def trial_vote(pred, true, trial_ids):
    if trial_ids is None:
        return None, None
    pred = pred.cpu().numpy()
    true = true.cpu().numpy()
    tids = trial_ids.cpu().numpy() if torch.is_tensor(trial_ids) else np.array(trial_ids)
    uniq = np.unique(tids)
    tri_pred, tri_true = [], []
    for tid in uniq:
        idx = (tids == tid)
        pv = np.bincount(pred[idx]).argmax()
        tv = np.bincount(true[idx]).argmax()
        tri_pred.append(pv)
        tri_true.append(tv)
    return torch.tensor(tri_pred), torch.tensor(tri_true)


def train_one_subject(device, base_dir, sub_name, args, model_kwargs):
    sub_dir = os.path.join(base_dir, 'by_subject', sub_name)
    train_x, train_y, _ = load_split(sub_dir, 'train')
    val_x, val_y, _ = (None, None, None)
    if os.path.isfile(os.path.join(sub_dir, 'val.pt')):
        val_x, val_y, _ = load_split(sub_dir, 'val')
    test_x, test_y, test_trial_ids = load_split(sub_dir, 'test')

    c = int(train_x.shape[1])
    t = int(train_x.shape[2])
    num_classes = int(max(train_y.max().item(), test_y.max().item()) + 1)

    model = STSupervised(
        in_channels=c,
        num_classes=num_classes,
        d=args.d, k=args.k, s=args.s,
        proj_dim=args.proj_dim,
        attn_hidden=args.attn_hidden,
        temp_hidden=args.temp_hidden,
        temp_layers=args.temp_layers,
        dropout=args.dropout,
        **model_kwargs,
    ).to(device)

    opt = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    criterion = nn.CrossEntropyLoss()

    train_loader = make_loader(train_x.float(), train_y.long(), args.batch_size, True)
    val_loader = make_loader(val_x.float(), val_y.long(), args.batch_size, False) if val_x is not None else None
    test_loader = make_loader(test_x.float(), test_y.long(), args.batch_size, False)

    best_acc = 0.0
    save_dir = os.path.join(os.path.dirname(__file__), 'save_subject')
    os.makedirs(save_dir, exist_ok=True)
    best_path = os.path.join(save_dir, f'{sub_name}_best.pth')

    for epoch in range(1, args.epochs + 1):
        model.train()
        tp, ta = 0, 0
        tr_loss_sum = 0.0
        for xb, yb in train_loader:
            xb = xb.to(device)
            yb = yb.to(device)
            opt.zero_grad(set_to_none=True)
            logits, _ = model(xb)
            loss = criterion(logits, yb)
            loss.backward()
            opt.step()
            tp += (logits.argmax(1) == yb).sum().item()
            ta += yb.size(0)
            tr_loss_sum += loss.item() * yb.size(0)
        tr_acc = tp / max(1, ta)
        tr_loss = tr_loss_sum / max(1, ta)

        if val_loader is not None:
            model.eval()
            vp, va = 0, 0
            val_loss_sum = 0.0
            with torch.no_grad():
                for xb, yb in val_loader:
                    xb = xb.to(device)
                    yb = yb.to(device)
                    logits, _ = model(xb)
                    vloss = criterion(logits, yb)
                    vp += (logits.argmax(1) == yb).sum().item()
                    va += yb.size(0)
                    val_loss_sum += vloss.item() * yb.size(0)
            val_acc = vp / max(1, va)
            val_loss = val_loss_sum / max(1, va)
            print(f"{sub_name} epoch {epoch:03d} | tr_loss={tr_loss:.4f} tr_acc={tr_acc:.4f} | val_loss={val_loss:.4f} val_acc={val_acc:.4f}")
            if val_acc > best_acc:
                best_acc = val_acc
                torch.save(model.state_dict(), best_path)
        else:
            print(f"{sub_name} epoch {epoch:03d} | tr_loss={tr_loss:.4f} tr_acc={tr_acc:.4f}")
            if tr_acc > best_acc:
                best_acc = tr_acc
                torch.save(model.state_dict(), best_path)

    if os.path.isfile(best_path):
        model.load_state_dict(torch.load(best_path, map_location=device))

    model.eval()
    ps, ys = [], []
    test_loss_sum = 0.0
    with torch.no_grad():
        for xb, yb in test_loader:
            xb = xb.to(device)
            logits, _ = model(xb)
            tloss = criterion(logits, yb.to(device))
            ps.append(logits.argmax(1).cpu())
            ys.append(yb)
            test_loss_sum += tloss.item() * yb.size(0)
    ps = torch.cat(ps, 0)
    ys = torch.cat(ys, 0)

    seg_acc = accuracy(ps, ys)
    seg_f1 = f1_macro(ps, ys)
    num_classes = int(max(ys.max().item(), ps.max().item()) + 1)
    cm = (torch.stack([ps, ys], 1))  # 占位，不额外打印矩阵
    test_loss = test_loss_sum / max(1, ys.size(0))
    print(f"{sub_name} TEST: loss={test_loss:.4f} seg_acc={seg_acc:.4f} seg_f1={seg_f1:.4f}")

    tri_pred, tri_true = trial_vote(ps, ys, test_trial_ids)
    tri_acc, tri_f1 = None, None
    if tri_pred is not None:
        tri_acc = accuracy(tri_pred, tri_true)
        tri_f1 = f1_macro(tri_pred, tri_true)

    return seg_acc, seg_f1, tri_acc, tri_f1


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default=os.path.join(PROJECT_ROOT, 'Preprocess_data', 'configs', 'deap_config.json'))
    parser.add_argument('--epochs', type=int, default=200)
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--lr', type=float, default=1e-3)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--k', type=int, default=9)
    parser.add_argument('--s', type=int, default=1)
    parser.add_argument('--d', type=int, default=4)
    parser.add_argument('--proj_dim', type=int, default=64)
    parser.add_argument('--attn_hidden', type=int, default=32)
    parser.add_argument('--temp_hidden', type=int, default=128)
    parser.add_argument('--temp_layers', type=int, default=3)
    parser.add_argument('--weight_decay', type=float, default=0.0)
    parser.add_argument('--gpu', type=str, default='0')
    parser.add_argument('--use_spatial', action='store_true')
    parser.add_argument('--use_temporal', action='store_true')
    parser.add_argument('--use_spatial_gcn', action='store_true')
    parser.add_argument('--tscc_k', type=int, default=7)
    parser.add_argument('--tscc_h1', type=int, default=64)
    parser.add_argument('--tscc_h2', type=int, default=128)
    parser.add_argument('--tscc_h3', type=int, default=128)
    parser.add_argument('--spatial_out', type=int, default=32)
    args = parser.parse_args()

    if len(args.gpu) > 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    with open(args.config, 'r', encoding='utf-8') as f:
        cfg = json.load(f)
    round_dir = derive_round_dir(cfg)
    by_sub_dir = os.path.join(round_dir, 'by_subject')
    subs = [d for d in sorted(os.listdir(by_sub_dir)) if os.path.isdir(os.path.join(by_sub_dir, d))]

    use_spatial = bool(args.use_spatial)
    # 若未显式指定任一分支，则默认启用时序；空间GCN默认关闭
    any_flag = (args.use_spatial or args.use_temporal or args.use_spatial_gcn)
    use_temporal = bool(args.use_temporal) if any_flag else True
    use_spatial_gcn = bool(args.use_spatial_gcn) if any_flag else False
    model_kwargs = dict(
        use_spatial=use_spatial,
        use_temporal=use_temporal,
        use_spatial_gcn=use_spatial_gcn,
        tscc_k=args.tscc_k,
        tscc_h1=args.tscc_h1,
        tscc_h2=args.tscc_h2,
        tscc_h3=args.tscc_h3,
        spatial_out=args.spatial_out,
    )

    all_seg_acc, all_seg_f1, all_tri_acc, all_tri_f1 = [], [], [], []
    for s in subs:
        seg_acc, seg_f1, tri_acc, tri_f1 = train_one_subject(device, round_dir, s, args, model_kwargs)
        all_seg_acc.append(seg_acc)
        all_seg_f1.append(seg_f1)
        if tri_acc is not None:
            all_tri_acc.append(tri_acc)
            all_tri_f1.append(tri_f1)
        print(f"{s}: seg_acc={seg_acc:.4f}, seg_f1={seg_f1:.4f}, tri_acc={None if tri_acc is None else f'{tri_acc:.4f}'}, tri_f1={None if tri_f1 is None else f'{tri_f1:.4f}'}")

    print(f"SEGMENT-WISE: acc={np.mean(all_seg_acc):.4f} f1={np.mean(all_seg_f1):.4f}")
    if len(all_tri_acc) > 0:
        print(f"TRIAL-WISE: acc={np.mean(all_tri_acc):.4f} f1={np.mean(all_tri_f1):.4f}")


if __name__ == '__main__':
    main()
