import os
import sys
import json
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader

PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
if PROJECT_ROOT not in sys.path:
    sys.path.insert(0, PROJECT_ROOT)

# Import ChannelNet
from channelnet.config import EEGModelConfig
from channelnet.model import ChannelNetModel


def derive_round_dir(cfg):
    base = os.path.join(
        cfg["output_dir"],
        cfg["dataset"],
        f"{cfg['feature_type']}-tw-{cfg['time_window']}ol-{cfg['overlap']}",
        cfg["experiment_mode"],
        cfg["split_type"],
        f"round_{int(cfg.get('round_index', 1))}"
    )
    return base


def load_split(split_dir, name):
    obj = torch.load(os.path.join(split_dir, f"{name}.pt"), map_location='cpu')
    x = obj["samples"]
    y = obj["labels"]
    return x, y


def prepare_x(x: torch.Tensor) -> torch.Tensor:
    # x can be (N,C,T) or (N,S,C,B)
    if x.dim() == 3:
        # (N,C,T) -> keep
        return x
    if x.dim() == 4:
        # (N,S,C,B) -> (N,C,S*B)
        n, s, c, b = x.shape
        x = x.permute(0, 2, 1, 3).contiguous().view(n, c, s * b)
        return x
    raise ValueError(f"Unsupported input shape: {tuple(x.shape)}")


def make_loader(x, y, batch_size, shuffle=True, num_workers=0):
    ds = TensorDataset(x, y)
    return DataLoader(ds, batch_size=batch_size, shuffle=shuffle, pin_memory=True, num_workers=num_workers)


def accuracy_from_logits(logits: torch.Tensor, y: torch.Tensor) -> float:
    with torch.no_grad():
        p = logits.argmax(1)
        return (p == y).float().mean().item()


def accuracy(pred: torch.Tensor, true: torch.Tensor) -> float:
    return (pred == true).float().mean().item()


def f1_macro(pred: torch.Tensor, true: torch.Tensor) -> float:
    num_classes = int(true.max().item() + 1)
    f1s = []
    for k in range(num_classes):
        tp = ((pred == k) & (true == k)).sum().item()
        fp = ((pred == k) & (true != k)).sum().item()
        fn = ((pred != k) & (true == k)).sum().item()
        p = tp / (tp + fp) if (tp + fp) > 0 else 0.0
        r = tp / (tp + fn) if (tp + fn) > 0 else 0.0
        f1 = 2 * p * r / (p + r) if (p + r) > 0 else 0.0
        f1s.append(f1)
    return float(np.mean(f1s))


def confusion_matrix(pred: torch.Tensor, true: torch.Tensor, num_classes: int) -> torch.Tensor:
    m = torch.zeros((num_classes, num_classes), dtype=torch.long)
    for t, p in zip(true.view(-1), pred.view(-1)):
        m[int(t.item()), int(p.item())] += 1
    return m


def build_model(c: int, t: int, num_classes: int, device: torch.device, args) -> ChannelNetModel:
    cfg = EEGModelConfig(
        in_channels=1,
        temp_channels=getattr(args, 'temp_channels', 10),
        out_channels=getattr(args, 'out_channels', 50),
        num_classes=num_classes,
        embedding_size=getattr(args, 'embedding_size', 256),
        input_width=int(t),
        input_height=int(c),
        # Safer settings to avoid tiny spatial height causing 3x3 conv invalid
        num_residual_blocks=0,
        down_kernel=1,
        down_stride=1,
        spatial_stride=(1, 1),
        # Keep other hyperparameters as defaults (temporal layers, strides, dilations etc.)
    )
    model = ChannelNetModel(config=cfg)
    return model.to(device)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default=os.path.join(PROJECT_ROOT, 'Preprocess_data', 'configs', 'deap_config.json'))
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--lr', type=float, default=1e-3)
    parser.add_argument('--weight_decay', type=float, default=1e-4)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--gpu', type=str, default='0')
    # ChannelNet hyperparameters (optional overrides)
    parser.add_argument('--temp_channels', type=int, default=10)
    parser.add_argument('--out_channels', type=int, default=50)
    parser.add_argument('--embedding_size', type=int, default=256)
    args = parser.parse_args()

    if len(args.gpu) > 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Device: {device}")

    with open(args.config, 'r', encoding='utf-8') as f:
        cfg = json.load(f)
    round_dir = derive_round_dir(cfg)
    print(f"Data directory: {round_dir}")

    train_x, train_y = load_split(round_dir, 'train')
    val_x, val_y = (None, None)
    val_path = os.path.join(round_dir, 'val.pt')
    if os.path.isfile(val_path):
        val_x, val_y = load_split(round_dir, 'val')
    test_x, test_y = load_split(round_dir, 'test')

    # Prepare input shapes
    train_x = prepare_x(train_x)
    if val_x is not None:
        val_x = prepare_x(val_x)
    test_x = prepare_x(test_x)

    # Expect (B, C=32, T=512) for DEAP raw
    c = int(train_x.shape[1])
    t = int(train_x.shape[2])
    num_classes = int(max(train_y.max().item(), test_y.max().item()) + 1)

    model = build_model(c, t, num_classes, device, args)
    opt = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    criterion = nn.CrossEntropyLoss()

    train_loader = make_loader(train_x.float(), train_y.long(), args.batch_size, True, args.num_workers)
    val_loader = make_loader(val_x.float(), val_y.long(), args.batch_size, False, args.num_workers) if val_x is not None else None
    test_loader = make_loader(test_x.float(), test_y.long(), args.batch_size, False, args.num_workers)

    save_dir = os.path.join(os.path.dirname(__file__), 'save')
    os.makedirs(save_dir, exist_ok=True)
    best_path = os.path.join(save_dir, 'best.pth')
    best_metric = -1.0

    for epoch in range(1, args.epochs + 1):
        model.train()
        total_loss = 0.0
        total_n = 0
        total_acc = 0.0
        for xb, yb in train_loader:
            xb = xb.unsqueeze(1).to(device)  # (B,1,C,T)
            yb = yb.to(device)
            opt.zero_grad(set_to_none=True)
            emb, logits = model(xb)
            loss = criterion(logits, yb)
            loss.backward()
            opt.step()

            bs = yb.size(0)
            total_loss += loss.item() * bs
            total_acc += accuracy_from_logits(logits.detach(), yb) * bs
            total_n += bs
        tr_loss = total_loss / max(1, total_n)
        tr_acc = total_acc / max(1, total_n)

        if val_loader is not None:
            model.eval()
            val_loss = 0.0
            val_acc = 0.0
            val_n = 0
            with torch.no_grad():
                for xb, yb in val_loader:
                    xb = xb.unsqueeze(1).to(device)
                    yb = yb.to(device)
                    _, logits = model(xb)
                    loss = criterion(logits, yb)
                    bs = yb.size(0)
                    val_loss += loss.item() * bs
                    val_acc += accuracy_from_logits(logits, yb) * bs
                    val_n += bs
            val_loss /= max(1, val_n)
            val_acc /= max(1, val_n)
            print(f"epoch {epoch:03d} | tr_loss={tr_loss:.4f} tr_acc={tr_acc:.4f} | val_loss={val_loss:.4f} val_acc={val_acc:.4f}")
            metric = val_acc
        else:
            print(f"epoch {epoch:03d} | tr_loss={tr_loss:.4f} tr_acc={tr_acc:.4f}")
            metric = tr_acc

        if metric >= best_metric:
            best_metric = metric
            torch.save(model.state_dict(), best_path)

    # Load best and evaluate on test
    if os.path.isfile(best_path):
        model.load_state_dict(torch.load(best_path, map_location=device))

    model.eval()
    ps, ys = [], []
    with torch.no_grad():
        for xb, yb in test_loader:
            xb = xb.unsqueeze(1).to(device)
            _, logits = model(xb)
            ps.append(logits.argmax(1).cpu())
            ys.append(yb)
    ps = torch.cat(ps, 0)
    ys = torch.cat(ys, 0)

    acc = accuracy(ps, ys)
    f1 = f1_macro(ps, ys)
    num_classes = int(max(ys.max().item(), ps.max().item()) + 1)
    cm = confusion_matrix(ps, ys, num_classes)

    print(f"Test acc={acc:.4f} f1={f1:.4f}")
    print("Confusion matrix (rows=true, cols=pred):")
    for r in cm.tolist():
        print(" ".join(str(int(x)) for x in r))


if __name__ == '__main__':
    main()
