import os
import json
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from model import TSception

def set_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True

def derive_save_dir(cfg):
    base = os.path.join(
        cfg["output_dir"],
        cfg["dataset"],
        f"{cfg['feature_type']}-tw-{cfg['time_window']}ol-{cfg['overlap']}",
        cfg["experiment_mode"],
        cfg["split_type"],
        f"round_{int(cfg.get('round_index', 1))}"
    )
    return base

def load_split(split_dir, name):
    obj = torch.load(os.path.join(split_dir, f"{name}.pt"))
    x = obj["samples"]
    y = obj["labels"]
    trial_ids = obj.get("trial_ids")
    return x, y, trial_ids

def prepare_x(x):
    if x.dim() == 4:
        n, s, c, b = x.shape
        x = x.view(n, c, s * b)
    elif x.dim() == 3:
        n, c, t = x.shape
    else:
        raise ValueError("unsupported input shape")
    x = x.unsqueeze(1)
    return x

def make_loader(x, y, batch_size, shuffle=True):
    ds = TensorDataset(x, y)
    return DataLoader(ds, batch_size=batch_size, shuffle=shuffle, pin_memory=True)

def accuracy(pred, true):
    return (pred == true).float().mean().item()

def f1_macro(pred, true):
    num_classes = int(true.max().item() + 1)
    f1s = []
    for k in range(num_classes):
        tp = ((pred == k) & (true == k)).sum().item()
        fp = ((pred == k) & (true != k)).sum().item()
        fn = ((pred != k) & (true == k)).sum().item()
        p = tp / (tp + fp) if (tp + fp) > 0 else 0.0
        r = tp / (tp + fn) if (tp + fn) > 0 else 0.0
        f1 = 2 * p * r / (p + r) if (p + r) > 0 else 0.0
        f1s.append(f1)
    return float(np.mean(f1s))

def trial_vote(pred, true, trial_ids):
    if trial_ids is None:
        return None, None
    pred = pred.cpu().numpy()
    true = true.cpu().numpy()
    tids = trial_ids.cpu().numpy() if torch.is_tensor(trial_ids) else np.array(trial_ids)
    uniq = np.unique(tids)
    trial_pred = []
    trial_true = []
    for tid in uniq:
        idx = (tids == tid)
        pv = np.bincount(pred[idx]).argmax()
        tv = np.bincount(true[idx]).argmax()
        trial_pred.append(pv)
        trial_true.append(tv)
    trial_pred = torch.tensor(trial_pred)
    trial_true = torch.tensor(trial_true)
    return trial_pred, trial_true

def train_one_subject(device, base_dir, sub_name, args):
    sub_dir = os.path.join(base_dir, "by_subject", sub_name)
    train_x, train_y, _ = load_split(sub_dir, 'train')
    val_x, val_y, _ = (None, None, None)
    if os.path.isfile(os.path.join(sub_dir, 'val.pt')):
        val_x, val_y, _ = load_split(sub_dir, 'val')
    test_x, test_y, test_trial_ids = load_split(sub_dir, 'test')

    train_x = prepare_x(train_x)
    if val_x is not None:
        val_x = prepare_x(val_x)
    test_x = prepare_x(test_x)

    num_classes = int(max(train_y.max().item(), test_y.max().item()) + 1)
    c = train_x.shape[2]
    t = train_x.shape[3]
    sr = 128

    model = TSception(num_classes=num_classes, input_size=(1, c, t), sampling_rate=sr, num_T=args.T, num_S=args.T, hidden=args.hidden, dropout_rate=args.dropout)
    model = model.to(device)
    opt = torch.optim.Adam(model.parameters(), lr=args.lr)
    criterion = nn.CrossEntropyLoss()

    train_loader = make_loader(train_x, train_y.long(), args.batch_size, True)
    val_loader = make_loader(val_x, val_y.long(), args.batch_size, False) if val_x is not None else None
    test_loader = make_loader(test_x, test_y.long(), args.batch_size, False)

    best_acc = 0.0
    save_dir = os.path.join(os.path.dirname(__file__), 'save_subject')
    os.makedirs(save_dir, exist_ok=True)
    best_path = os.path.join(save_dir, f'{sub_name}_best.pth')

    for epoch in range(1, args.epochs + 1):
        model.train()
        tp, ta = 0, 0
        for xb, yb in train_loader:
            xb = xb.to(device)
            yb = yb.to(device)
            opt.zero_grad()
            out = model(xb)
            loss = criterion(out, yb)
            loss.backward()
            opt.step()
            tp += (out.argmax(1) == yb).sum().item()
            ta += yb.size(0)
        tr_acc = tp / ta

        if val_loader is not None:
            model.eval()
            vp, va = 0, 0
            with torch.no_grad():
                for xb, yb in val_loader:
                    xb = xb.to(device)
                    yb = yb.to(device)
                    out = model(xb)
                    vp += (out.argmax(1) == yb).sum().item()
                    va += yb.size(0)
            val_acc = vp / va
            if val_acc > best_acc:
                best_acc = val_acc
                torch.save(model.state_dict(), best_path)
        else:
            if tr_acc > best_acc:
                best_acc = tr_acc
                torch.save(model.state_dict(), best_path)

    if os.path.isfile(best_path):
        model.load_state_dict(torch.load(best_path, map_location=device))

    model.eval()
    ps, ys = [], []
    with torch.no_grad():
        for xb, yb in test_loader:
            xb = xb.to(device)
            out = model(xb)
            ps.append(out.argmax(1).cpu())
            ys.append(yb)
    ps = torch.cat(ps, 0)
    ys = torch.cat(ys, 0)

    seg_acc = accuracy(ps, ys)
    seg_f1 = f1_macro(ps, ys)

    tri_pred, tri_true = trial_vote(ps, ys, test_trial_ids)
    tri_acc, tri_f1 = None, None
    if tri_pred is not None:
        tri_acc = accuracy(tri_pred, tri_true)
        tri_f1 = f1_macro(tri_pred, tri_true)

    return seg_acc, seg_f1, tri_acc, tri_f1

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default=os.path.join(os.path.dirname(os.path.dirname(__file__)), 'Preprocess_data', 'configs', 'deap_config.json'))
    parser.add_argument('--epochs', type=int, default=500)
    parser.add_argument('--batch-size', type=int, default=64)
    parser.add_argument('--lr', type=float, default=1e-3)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--T', type=int, default=15)
    parser.add_argument('--hidden', type=int, default=32)
    parser.add_argument('--gpu', type=str, default='')
    parser.add_argument('--seed', type=int, default=2021)
    args = parser.parse_args()

    set_seed(args.seed)
    if len(args.gpu) > 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device = torch.device('cuda' if torch.cuda.is_available() and len(args.gpu) > 0 else 'cpu')

    with open(args.config, 'r', encoding='utf-8') as f:
        cfg = json.load(f)
    base_dir = derive_save_dir(cfg)
    by_sub = os.path.join(base_dir, 'by_subject')
    subs = [d for d in sorted(os.listdir(by_sub)) if d.startswith('sub') and os.path.isdir(os.path.join(by_sub, d))]

    all_seg_acc, all_seg_f1, all_tri_acc, all_tri_f1 = [], [], [], []
    for s in subs:
        seg_acc, seg_f1, tri_acc, tri_f1 = train_one_subject(device, base_dir, s, args)
        all_seg_acc.append(seg_acc)
        all_seg_f1.append(seg_f1)
        if tri_acc is not None:
            all_tri_acc.append(tri_acc)
            all_tri_f1.append(tri_f1)
        print(f"{s}: seg_acc={seg_acc:.4f}, seg_f1={seg_f1:.4f}, tri_acc={None if tri_acc is None else f'{tri_acc:.4f}'}, tri_f1={None if tri_f1 is None else f'{tri_f1:.4f}'}")

    print(f"SEGMENT-WISE: acc={np.mean(all_seg_acc):.4f} f1={np.mean(all_seg_f1):.4f}")
    if len(all_tri_acc) > 0:
        print(f"TRIAL-WISE: acc={np.mean(all_tri_acc):.4f} f1={np.mean(all_tri_f1):.4f}")

if __name__ == '__main__':
    main()
